Merge pull request #34573 from cyphar/dm-dos-prevention-remove-mountpoint

devicemapper: remove container rootfs mountPath after umount
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 58ed550..ea7ae05 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -1,4 +1,4 @@
-# Github code owners
+# GitHub code owners
 # See https://help.github.com/articles/about-codeowners/
 #
 # KEEP THIS FILE SORTED. Order is important. Last match takes precedence.
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
index 7362480..64459e8 100644
--- a/.github/ISSUE_TEMPLATE.md
+++ b/.github/ISSUE_TEMPLATE.md
@@ -10,19 +10,25 @@
 will, however, reopen it if you later provide the information.
 
 For more information about reporting issues, see
-https://github.com/docker/docker/blob/master/CONTRIBUTING.md#reporting-other-issues
+https://github.com/moby/moby/blob/master/CONTRIBUTING.md#reporting-other-issues
 
 ---------------------------------------------------
 GENERAL SUPPORT INFORMATION
 ---------------------------------------------------
 
 The GitHub issue tracker is for bug reports and feature requests.
-General support can be found at the following locations:
+General support for **docker** can be found at the following locations:
 
 - Docker Support Forums - https://forums.docker.com
-- IRC - irc.freenode.net #docker channel
+- Slack - community.docker.com #general channel
 - Post a question on StackOverflow, using the Docker tag
 
+General support for **moby** can be found at the following locations:
+
+- Moby Project Forums - https://forums.mobyproject.org
+- Slack - community.docker.com #moby-project channel
+- Post a question on StackOverflow, using the Moby tag
+
 ---------------------------------------------------
 BUG REPORT INFORMATION
 ---------------------------------------------------
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 4269818..fad7555 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,6 +1,6 @@
 <!--
 Please make sure you've read and understood our contributing guidelines;
-https://github.com/docker/docker/blob/master/CONTRIBUTING.md
+https://github.com/moby/moby/blob/master/CONTRIBUTING.md
 
 ** Make sure all your commits include a signature generated with `git commit -s` **
 
diff --git a/.mailmap b/.mailmap
index c48f290..928e817 100644
--- a/.mailmap
+++ b/.mailmap
@@ -10,6 +10,7 @@
 Shishir Mahajan <shishir.mahajan@redhat.com> <smahajan@redhat.com>
 Erwin van der Koogh <info@erronis.nl>
 Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Alessandro Boch <aboch@tetrationanalytics.com> <aboch@docker.com>
 Tejesh Mehta <tejesh.mehta@gmail.com> <tj@init.me>
 Cristian Staretu <cristian.staretu@gmail.com>
 Cristian Staretu <cristian.staretu@gmail.com> <unclejacksons@gmail.com>
@@ -38,9 +39,14 @@
 Thatcher Peskens <thatcher@docker.com> dhrp <thatcher@gmx.net>
 Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@dotcloud.com>
 Jérôme Petazzoni <jerome.petazzoni@docker.com> <jp@enix.org>
+Jérôme Petazzoni <jerome.petazzoni@docker.com> <jerome.petazzoni@gmail.com>
 Joffrey F <joffrey@docker.com>
 Joffrey F <joffrey@docker.com> <joffrey@dotcloud.com>
 Joffrey F <joffrey@docker.com> <f.joffrey@gmail.com>
+Kir Kolyshkin <kolyshkin@gmail.com> <kir@openvz.org>
+Kir Kolyshkin <kolyshkin@gmail.com>
+Lorenzo Fontana <lo@linux.com> <fontanalorenzo@me.com>
+<mr.wrfly@gmail.com> <wrfly@users.noreply.github.com>
 Tim Terhorst <mynamewastaken+git@gmail.com>
 Andy Smith <github@anarkystic.com>
 <kalessin@kalessin.fr> <louis@dotcloud.com>
@@ -93,6 +99,7 @@
 Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@home.org.au>
 Sven Dowideit <SvenDowideit@home.org.au> <SvenDowideit@users.noreply.github.com>
 Sven Dowideit <SvenDowideit@home.org.au> <sven@t440s.home.gateway>
+<info@fortinux.com> <fortinux@users.noreply.github.com>
 Akihiro Matsushima <amatsusbit@gmail.com> <amatsus@users.noreply.github.com>
 <alexl@redhat.com> <alexander.larsson@gmail.com>
 Alexander Morozov <lk4d4@docker.com> <lk4d4math@gmail.com>
@@ -166,6 +173,7 @@
 Deshi Xiao <dxiao@redhat.com> <xiaods@gmail.com>
 Doug Davis <dug@us.ibm.com> <duglin@users.noreply.github.com>
 Giampaolo Mancini <giampaolo@trampolineup.com>
+Hakan Özler <hakan.ozler@kodcu.com>
 K. Heller <pestophagous@gmail.com> <pestophagous@users.noreply.github.com>
 Jacob Atzen <jacob@jacobatzen.dk> <jatzen@gmail.com>
 Jeff Nickoloff <jeff.nickoloff@gmail.com> <jeff@allingeek.com>
@@ -184,15 +192,21 @@
 Mageee <fangpuyi@foxmail.com> <21521230.zju.edu.cn>
 Mansi Nahar <mmn4185@rit.edu> <mansinahar@users.noreply.github.com>
 Mansi Nahar <mmn4185@rit.edu> <mansi.nahar@macbookpro-mansinahar.local>
+Markus Kortlang <hyp3rdino@googlemail.com> <markus.kortlang@lhsystems.com>
 Mary Anthony <mary.anthony@docker.com> <mary@docker.com>
 Mary Anthony <mary.anthony@docker.com> moxiegirl <mary@docker.com>
 Mary Anthony <mary.anthony@docker.com> <moxieandmore@gmail.com>
-mattyw <mattyw@me.com> <gh@mattyw.net>
+Matt Schurenko <matt.schurenko@gmail.com>
+Matt Williams <mattyw@me.com> <gh@mattyw.net>
+Matt Williams <mattyw@me.com>
 Michael Spetsiotis <michael_spets@hotmail.com>
 Nik Nyby <nikolas@gnu.org> <nnyby@columbia.edu>
+Ouyang Liduo <oyld0210@163.com>
+Paul Liljenberg <liljenberg.paul@gmail.com> <letters@paulnotcom.se>
+Pawel Konczalski <mail@konczalski.de>
+Philip Alexander Etling <paetling@gmail.com>
 Peter Jaffe <pjaffe@nevo.com>
-resouer <resouer@163.com> <resouer@gmail.com>
-AJ Bowen <aj@gandi.net> soulshake <amy@gandi.net> 
+AJ Bowen <aj@gandi.net> soulshake <amy@gandi.net>
 AJ Bowen <aj@gandi.net> soulshake <aj@gandi.net>
 Tibor Vass <teabee89@gmail.com> <tibor@docker.com>
 Tibor Vass <teabee89@gmail.com> <tiborvass@users.noreply.github.com>
@@ -202,7 +216,8 @@
 John Howard (VM) <John.Howard@microsoft.com> jhowardmsft <jhoward@microsoft.com>
 Ankush Agarwal <ankushagarwal11@gmail.com> <ankushagarwal@users.noreply.github.com>
 Tangi COLIN <tangicolin@gmail.com> tangicolin <tangicolin@gmail.com>
-Allen Sun <allen.sun@daocloud.io>
+Allen Sun <allensun.shl@alibaba-inc.com> <allen.sun@daocloud.io>
+Allen Sun <allensun.shl@alibaba-inc.com> <shlallen1990@gmail.com>
 Adrien Gallouët <adrien@gallouet.fr> <angt@users.noreply.github.com>
 <aanm90@gmail.com> <martins@noironetworks.com>
 Anuj Bahuguna <anujbahuguna.dev@gmail.com>
@@ -218,7 +233,9 @@
 Daniel Nephin <dnephin@docker.com> <dnephin@gmail.com>
 Dave Tucker <dt@docker.com> <dave@dtucker.co.uk>
 Doug Tangren <d.tangren@gmail.com>
+Euan Kemp <euan.kemp@coreos.com> <euank@amazon.com>
 Frederick F. Kautz IV <fkautz@redhat.com> <fkautz@alumni.cmu.edu>
+Fengtu Wang <wangfengtu@huawei.com> <wangfengtu@huawei.com>
 Ben Golub <ben.golub@dotcloud.com>
 Harold Cooper <hrldcpr@gmail.com>
 hsinko <21551195@zju.edu.cn> <hsinko@users.noreply.github.com>
@@ -247,6 +264,7 @@
 <soshi.katsuta@gmail.com> <katsuta_soshi@cyberagent.co.jp>
 Stefan Berger <stefanb@linux.vnet.ibm.com>
 <stefanb@linux.vnet.ibm.com> <stefanb@us.ibm.com>
+Stefan J. Wernli <swernli@microsoft.com> <swernli@ntdev.microsoft.com>
 Stephen Day <stephen.day@docker.com>
 <stephen.day@docker.com> <stevvooe@users.noreply.github.com>
 Toli Kuznets <toli@docker.com>
@@ -292,9 +310,13 @@
 <nicholasjamesrusso@gmail.com> <nicholasrusso@icloud.com>
 Runshen Zhu <runshen.zhu@gmail.com>
 Tom Barlow <tomwbarlow@gmail.com>
+Tom Sweeney <tsweeney@redhat.com>
 Xianlu Bird <xianlubird@gmail.com>
 Dan Feldman <danf@jfrog.com>
+Harry Zhang <harryz@hyper.sh> <resouer@gmail.com>
 Harry Zhang <harryz@hyper.sh> <harryzhang@zju.edu.cn>
+Harry Zhang <harryz@hyper.sh> <resouer@163.com>
+Harry Zhang <resouer@163.com>
 Alex Chen <alexchenunix@gmail.com> alexchen <root@localhost.localdomain>
 Alex Ellis <alexellis2@gmail.com>
 Alicia Lauerman <alicia@eta.im> <allydevour@me.com>
@@ -307,6 +329,7 @@
 Chen Qiu <cheney-90@hotmail.com> <21321229@zju.edu.cn>
 Chris Dias <cdias@microsoft.com>
 Chris McKinnel <chris.mckinnel@tangentlabs.co.uk>
+Christopher Biscardi <biscarch@sketcht.com>
 CUI Wei <ghostplant@qq.com> cuiwei13 <cuiwei13@pku.edu.cn>
 Daniel Grunwell <mwgrunny@gmail.com>
 Daniel J Walsh <dwalsh@redhat.com>
@@ -316,6 +339,8 @@
 Elan Ruusamäe <glen@pld-linux.org> <glen@delfi.ee>
 Elan Ruusamäe <glen@pld-linux.org>
 Eric G. Noriega <enoriega@vizuri.com> <egnoriega@users.noreply.github.com>
+Eugen Krizo <eugen.krizo@gmail.com>
+Evgeny Shmarnev <shmarnev@gmail.com>
 Evelyn Xu <evelynhsu21@gmail.com>
 Felix Ruess <felix.ruess@gmail.com> <felix.ruess@roboception.de>
 Gabriel Nicolas Avellaneda <avellaneda.gabriel@gmail.com>
@@ -323,6 +348,7 @@
 George Kontridze <george@bugsnag.com>
 Gopikannan Venugopalsamy <gopikannan.venugopalsamy@gmail.com>
 Gou Rao <gou@portworx.com> <gourao@users.noreply.github.com>
+Greg Stephens <greg@udon.org>
 Gustav Sinder <gustav.sinder@gmail.com>
 Harshal Patil <harshal.patil@in.ibm.com> <harche@users.noreply.github.com>
 Helen Xie <chenjg@harmonycloud.cn>
@@ -340,8 +366,10 @@
 Konstantin Gribov <grossws@gmail.com>
 Kunal Kushwaha <kushwaha_kunal_v7@lab.ntt.co.jp> <kunal.kushwaha@gmail.com>
 Lajos Papp <lajos.papp@sequenceiq.com> <lalyos@yahoo.com>
+Liang Mingqiang <mqliang.zju@gmail.com>
 Lyn <energylyn@zju.edu.cn>
 Markan Patel <mpatel678@gmail.com>
+Matthew Mosesohn <raytrac3r@gmail.com>
 Michael Käufl <docker@c.michael-kaeufl.de> <michael-k@users.noreply.github.com>
 Michal Minář <miminar@redhat.com>
 Michael Hudson-Doyle <michael.hudson@canonical.com> <michael.hudson@linaro.org>
@@ -349,6 +377,7 @@
 Milind Chawre <milindchawre@gmail.com>
 Ma Müller <mueller-ma@users.noreply.github.com>
 Moorthy RS <rsmoorthy@gmail.com> <rsmoorthy@users.noreply.github.com>
+Nace Oroz <orkica@gmail.com>
 Neil Horman <nhorman@tuxdriver.com> <nhorman@hmswarspite.think-freely.org>
 Pavel Tikhomirov <ptikhomirov@virtuozzo.com> <ptikhomirov@parallels.com>
 Peter Choi <phkchoi89@gmail.com> <reikani@Peters-MacBook-Pro.local>
@@ -358,7 +387,9 @@
 Roberto Muñoz Fernández <robertomf@gmail.com> <roberto.munoz.fernandez.contractor@bbva.com>
 Roman Dudin <katrmr@gmail.com> <decadent@users.noreply.github.com>
 Sandeep Bansal <sabansal@microsoft.com> <msabansal@microsoft.com>
+Sandeep Bansal <sabansal@microsoft.com>
 Sean Lee <seanlee@tw.ibm.com> <scaleoutsean@users.noreply.github.com>
+Shaun Kaasten <shaunk@gmail.com>
 Shukui Yang <yangshukui@huawei.com>
 Srinivasan Srivatsan <srinivasan.srivatsan@hpe.com> <srinsriv@users.noreply.github.com>
 Stefan S. <tronicum@user.github.com>
@@ -367,13 +398,17 @@
 Tim Bart <tim@fewagainstmany.com>
 Tim Zju <21651152@zju.edu.cn>
 Tõnis Tiigi <tonistiigi@gmail.com>
+Trishna Guha <trishnaguha17@gmail.com>
 Wayne Song <wsong@docker.com> <wsong@users.noreply.github.com>
+Wang Guoliang <liangcszzu@163.com>
 Wang Jie <wangjie5@chinaskycloud.com>
 Wang Ping <present.wp@icloud.com>
 Wang Yuexiao <wang.yuexiao@zte.com.cn>
+Wenjun Tang <tangwj2@lenovo.com> <dodia@163.com>
 Wewang Xiaorenfine <wang.xiaoren@zte.com.cn>
 Wei Wu <wuwei4455@gmail.com> cizixs <cizixs@163.com>
 Xiaoyu Zhang <zhang.xiaoyu33@zte.com.cn>
+Xuecong Liao <satorulogic@gmail.com>
 Yamasaki Masahide <masahide.y@gmail.com>
 Yassine Tijani <yasstij11@gmail.com>
 Ying Li <ying.li@docker.com> <cyli@twistedmatrix.com>
@@ -382,5 +417,6 @@
 Yu Peng <yu.peng36@zte.com.cn>
 Yu Peng <yu.peng36@zte.com.cn> <yupeng36@zte.com.cn>
 Yao Zaiyong <yaozaiyong@hotmail.com>
+ZhangHang <stevezhang2014@gmail.com>
 Zhenkun Bi <bi.zhenkun@zte.com.cn>
 Zhu Kunjia <zhu.kunjia@zte.com.cn>
diff --git a/AUTHORS b/AUTHORS
index e091ed7..f465a87 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -40,6 +40,7 @@
 AJ Bowen <aj@gandi.net>
 Ajey Charantimath <ajey.charantimath@gmail.com>
 ajneu <ajneu@users.noreply.github.com>
+Akash Gupta <akagup@microsoft.com>
 Akihiro Matsushima <amatsusbit@gmail.com>
 Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
 Akira Koyasu <mail@akirakoyasu.net>
@@ -53,7 +54,7 @@
 Aleksa Sarai <asarai@suse.de>
 Aleksandrs Fadins <aleks@s-ko.net>
 Alena Prokharchyk <alena@rancher.com>
-Alessandro Boch <aboch@docker.com>
+Alessandro Boch <aboch@tetrationanalytics.com>
 Alessio Biancalana <dottorblaster@gmail.com>
 Alex Chan <alex@alexwlchan.net>
 Alex Chen <alexchenunix@gmail.com>
@@ -83,7 +84,7 @@
 Alicia Lauerman <alicia@eta.im>
 Alihan Demir <alihan_6153@hotmail.com>
 Allen Madsen <blatyo@gmail.com>
-Allen Sun <allen.sun@daocloud.io>
+Allen Sun <allensun.shl@alibaba-inc.com>
 almoehi <almoehi@users.noreply.github.com>
 Alvaro Saurin <alvaro.saurin@gmail.com>
 Alvin Deng <alvin.q.deng@utexas.edu>
@@ -114,6 +115,7 @@
 Andrew France <andrew@avito.co.uk>
 Andrew Gerrand <adg@golang.org>
 Andrew Guenther <guenther.andrew.j@gmail.com>
+Andrew He <he.andrew.mail@gmail.com>
 Andrew Hsu <andrewhsu@docker.com>
 Andrew Kuklewicz <kookster@gmail.com>
 Andrew Macgregor <andrew.macgregor@agworld.com.au>
@@ -190,6 +192,7 @@
 Benjamin Boudreau <boudreau.benjamin@gmail.com>
 Benoit Chesneau <bchesneau@gmail.com>
 Bernerd Schaefer <bj.schaefer@gmail.com>
+Bernhard M. Wiedemann <bwiedemann@suse.de>
 Bert Goethals <bert@bertg.be>
 Bharath Thiruveedula <bharath_ves@hotmail.com>
 Bhiraj Butala <abhiraj.butala@gmail.com>
@@ -252,6 +255,7 @@
 Carl Henrik Lunde <chlunde@ping.uio.no>
 Carl Loa Odin <carlodin@gmail.com>
 Carl X. Su <bcbcarl@gmail.com>
+Carlo Mion <mion00@gmail.com>
 Carlos Alexandro Becker <caarlos0@gmail.com>
 Carlos Sanchez <carlos@apache.org>
 Carol Fager-Higgins <carol.fager-higgins@docker.com>
@@ -307,9 +311,11 @@
 Christian Rotzoll <ch.rotzoll@gmail.com>
 Christian Simon <simon@swine.de>
 Christian Stefanescu <st.chris@gmail.com>
-ChristoperBiscardi <biscarch@sketcht.com>
 Christophe Mehay <cmehay@online.net>
 Christophe Troestler <christophe.Troestler@umons.ac.be>
+Christophe Vidal <kriss@krizalys.com>
+Christopher Biscardi <biscarch@sketcht.com>
+Christopher Crone <christopher.crone@docker.com>
 Christopher Currie <codemonkey+github@gmail.com>
 Christopher Jones <tophj@linux.vnet.ibm.com>
 Christopher Latham <sudosurootdev@gmail.com>
@@ -473,6 +479,7 @@
 Doug Davis <dug@us.ibm.com>
 Doug MacEachern <dougm@vmware.com>
 Doug Tangren <d.tangren@gmail.com>
+Douglas Curtis <dougcurtis1@gmail.com>
 Dr Nic Williams <drnicwilliams@gmail.com>
 dragon788 <dragon788@users.noreply.github.com>
 Dražen Lučanin <kermit666@gmail.com>
@@ -486,6 +493,7 @@
 Eivind Uggedal <eivind@uggedal.com>
 Elan Ruusamäe <glen@pld-linux.org>
 Elena Morozova <lelenanam@gmail.com>
+Eli Uriegas <eli.uriegas@docker.com>
 Elias Faxö <elias.faxo@tre.se>
 Elias Probst <mail@eliasprobst.eu>
 Elijah Zupancic <elijah@zupancic.name>
@@ -520,10 +528,9 @@
 Erik Weathers <erikdw@gmail.com>
 Erno Hopearuoho <erno.hopearuoho@gmail.com>
 Erwin van der Koogh <info@erronis.nl>
-Euan <euank@amazon.com>
+Euan Kemp <euan.kemp@coreos.com>
+Eugen Krizo <eugen.krizo@gmail.com>
 Eugene Yakubovich <eugene.yakubovich@coreos.com>
-eugenkrizo <eugen.krizo@gmail.com>
-evalle <shmarnev@gmail.com>
 Evan Allrich <evan@unguku.com>
 Evan Carmi <carmi@users.noreply.github.com>
 Evan Hazlett <ehazlett@users.noreply.github.com>
@@ -533,6 +540,7 @@
 Evan Wies <evan@neomantra.net>
 Evelyn Xu <evelynhsu21@gmail.com>
 Everett Toews <everett.toews@rackspace.com>
+Evgeny Shmarnev <shmarnev@gmail.com>
 Evgeny Vereshchagin <evvers@ya.ru>
 Ewa Czechowska <ewa@ai-traders.com>
 Eystein Måløy Stenberg <eystein.maloy.stenberg@cfengine.com>
@@ -573,10 +581,12 @@
 Florian <FWirtz@users.noreply.github.com>
 Florian Klein <florian.klein@free.fr>
 Florian Maier <marsmensch@users.noreply.github.com>
+Florian Noeding <noeding@adobe.com>
 Florian Weingarten <flo@hackvalue.de>
 Florin Asavoaie <florin.asavoaie@gmail.com>
+Florin Patan <florinpatan@gmail.com>
 fonglh <fonglh@gmail.com>
-fortinux <fortinux@users.noreply.github.com>
+fortinux <info@fortinux.com>
 Foysal Iqbal <foysal.iqbal.fb@gmail.com>
 Francesc Campoy <campoy@google.com>
 Francis Chuang <francis.chuang@boostport.com>
@@ -638,6 +648,7 @@
 Graydon Hoare <graydon@pobox.com>
 Greg Fausak <greg@tacodata.com>
 Greg Pflaum <gpflaum@users.noreply.github.com>
+Greg Stephens <greg@udon.org>
 Greg Thornton <xdissent@me.com>
 Grzegorz Jaśkiewicz <gj.jaskiewicz@gmail.com>
 Guilhem Lettron <guilhem+github@lettron.fr>
@@ -650,6 +661,7 @@
 Gustav Sinder <gustav.sinder@gmail.com>
 gwx296173 <gaojing3@huawei.com>
 Günter Zöchbauer <guenter@gzoechbauer.com>
+Hakan Özler <hakan.ozler@kodcu.com>
 Hans Kristian Flaatten <hans@starefossen.com>
 Hans Rødtang <hansrodtang@gmail.com>
 Hao Shu Wei <haosw@cn.ibm.com>
@@ -681,8 +693,8 @@
 huqun <huqun@zju.edu.cn>
 Huu Nguyen <huu@prismskylabs.com>
 hyeongkyu.lee <hyeongkyu.lee@navercorp.com>
-hyp3rdino <markus.kortlang@lhsystems.com>
 Hyzhou Zhy <hyzhou.zhy@alibaba-inc.com>
+Iago López Galeiras <iago@kinvolk.io>
 Ian Babrou <ibobrik@gmail.com>
 Ian Bishop <ianbishop@pace7.com>
 Ian Bull <irbull@gmail.com>
@@ -962,7 +974,7 @@
 Kim BKC Carlbacker <kim.carlbacker@gmail.com>
 Kim Eik <kim@heldig.org>
 Kimbro Staken <kstaken@kstaken.com>
-Kir Kolyshkin <kir@openvz.org>
+Kir Kolyshkin <kolyshkin@gmail.com>
 Kiran Gangadharan <kiran.daredevil@gmail.com>
 Kirill Kolyshkin <kolyshkin@users.noreply.github.com>
 Kirill SIbirev <l0kix2@gmail.com>
@@ -1036,7 +1048,7 @@
 Lokesh Mandvekar <lsm5@fedoraproject.org>
 longliqiang88 <394564827@qq.com>
 Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
-Lorenzo Fontana <fontanalorenzo@me.com>
+Lorenzo Fontana <lo@linux.com>
 Louis Opter <kalessin@kalessin.fr>
 Luca Favatella <lucafavatella@users.noreply.github.com>
 Luca Marturana <lucamarturana@gmail.com>
@@ -1074,6 +1086,7 @@
 Marc Abramowitz <marc@marc-abramowitz.com>
 Marc Kuo <kuomarc2@gmail.com>
 Marc Tamsky <mtamsky@gmail.com>
+Marcel Edmund Franke <marcel.edmund.franke@gmail.com>
 Marcelo Salazar <chelosalazar@gmail.com>
 Marco Hennings <marco.hennings@freiheit.com>
 Marcus Cobden <mcobden@cisco.com>
@@ -1097,6 +1110,7 @@
 Marko Mikulicic <mmikulicic@gmail.com>
 Marko Tibold <marko@tibold.nl>
 Markus Fix <lispmeister@gmail.com>
+Markus Kortlang <hyp3rdino@googlemail.com>
 Martijn Dwars <ikben@martijndwars.nl>
 Martijn van Oosterhout <kleptog@svana.org>
 Martin Honermeyer <maze@strahlungsfrei.de>
@@ -1120,17 +1134,18 @@
 Matt Moore <mattmoor@google.com>
 Matt Richardson <matt@redgumtech.com.au>
 Matt Robenolt <matt@ydekproductions.com>
+Matt Schurenko <matt.schurenko@gmail.com>
+Matt Williams <mattyw@me.com>
 Matthew Heon <mheon@redhat.com>
 Matthew Lapworth <matthewl@bit-shift.net>
 Matthew Mayer <matthewkmayer@gmail.com>
+Matthew Mosesohn <raytrac3r@gmail.com>
 Matthew Mueller <mattmuelle@gmail.com>
 Matthew Riley <mattdr@google.com>
 Matthias Klumpp <matthias@tenstral.net>
 Matthias Kühnle <git.nivoc@neverbox.com>
 Matthias Rampke <mr@soundcloud.com>
 Matthieu Hauglustaine <matt.hauglustaine@gmail.com>
-mattymo <raytrac3r@gmail.com>
-mattyw <mattyw@me.com>
 Mauricio Garavaglia <mauricio@medallia.com>
 mauriyouth <mauriyouth@gmail.com>
 Max Shytikov <mshytikov@gmail.com>
@@ -1215,16 +1230,14 @@
 Morten Siebuhr <sbhr@sbhr.dk>
 Morton Fox <github@qslw.com>
 Moysés Borges <moysesb@gmail.com>
-mqliang <mqliang.zju@gmail.com>
+mrfly <mr.wrfly@gmail.com>
 Mrunal Patel <mrunalp@gmail.com>
-msabansal <sabansal@microsoft.com>
-mschurenko <matt.schurenko@gmail.com>
 Muayyad Alsadi <alsadi@gmail.com>
-muge <stevezhang2014@gmail.com>
 Mustafa Akın <mustafa91@gmail.com>
 Muthukumar R <muthur@gmail.com>
 Máximo Cuadros <mcuadros@gmail.com>
 Médi-Rémi Hashim <medimatrix@users.noreply.github.com>
+Nace Oroz <orkica@gmail.com>
 Nahum Shalman <nshalman@omniti.com>
 Nakul Pathak <nakulpathak3@hotmail.com>
 Nalin Dahyabhai <nalin@redhat.com>
@@ -1292,16 +1305,13 @@
 Olivier Gambier <dmp42@users.noreply.github.com>
 Olle Jonsson <olle.jonsson@gmail.com>
 Oriol Francès <oriolfa@gmail.com>
-orkaa <orkica@gmail.com>
 Oskar Niburski <oskarniburski@gmail.com>
 Otto Kekäläinen <otto@seravo.fi>
+Ouyang Liduo <oyld0210@163.com>
 Ovidio Mallo <ovidio.mallo@gmail.com>
-oyld <oyld0210@163.com>
-ozlerhakan <hakan.ozler@kodcu.com>
-paetling <paetling@gmail.com>
-pandrew <letters@paulnotcom.se>
-panticz <mail@konczalski.de>
+Panagiotis Moustafellos <pmoust@elastic.co>
 Paolo G. Giarrusso <p.giarrusso@gmail.com>
+Pascal <pascalgn@users.noreply.github.com>
 Pascal Borreli <pascal@borreli.com>
 Pascal Hartig <phartig@rdrei.net>
 Patrick Böänziger <patrick.baenziger@bsi-software.com>
@@ -1330,6 +1340,7 @@
 Pavel Tikhomirov <ptikhomirov@virtuozzo.com>
 Pavlos Ratis <dastergon@gentoo.org>
 Pavol Vargovcik <pallly.vargovcik@gmail.com>
+Pawel Konczalski <mail@konczalski.de>
 Peeyush Gupta <gpeeyush@linux.vnet.ibm.com>
 Peggy Li <peggyli.224@gmail.com>
 Pei Su <sillyousu@gmail.com>
@@ -1354,6 +1365,7 @@
 Phil <underscorephil@gmail.com>
 Phil Estes <estesp@linux.vnet.ibm.com>
 Phil Spitler <pspitler@gmail.com>
+Philip Alexander Etling <paetling@gmail.com>
 Philip Monroe <phil@philmonroe.com>
 Philipp Gillé <philipp.gille@gmail.com>
 Philipp Wahala <philipp.wahala@gmail.com>
@@ -1372,6 +1384,7 @@
 Porjo <porjo38@yahoo.com.au>
 Poul Kjeldager Sørensen <pks@s-innovations.net>
 Pradeep Chhetri <pradeep@indix.com>
+Pradip Dhara <pradipd@microsoft.com>
 Prasanna Gautam <prasannagautam@gmail.com>
 Pratik Karki <prertik@outlook.com>
 Prayag Verma <prayag.verma@gmail.com>
@@ -1407,8 +1420,7 @@
 Remi Rampin <remirampin@gmail.com>
 Remy Suen <remy.suen@gmail.com>
 Renato Riccieri Santos Zannon <renato.riccieri@gmail.com>
-resouer <resouer@163.com>
-rgstephens <greg@udon.org>
+Renaud Gaubert <rgaubert@nvidia.com>
 Rhys Hiltner <rhys@twitch.tv>
 Ricardo N Feliciano <FelicianoTech@gmail.com>
 Rich Moyse <rich@moyse.us>
@@ -1514,8 +1526,8 @@
 Sanket Saurav <sanketsaurav@gmail.com>
 Santhosh Manohar <santhosh@docker.com>
 sapphiredev <se.imas.kr@gmail.com>
+Sascha Andres <sascha.andres@outlook.com>
 Satnam Singh <satnam@raintown.org>
-satoru <satorulogic@gmail.com>
 Satoshi Amemiya <satoshi_amemiya@voyagegroup.com>
 Satoshi Tagomori <tagomoris@gmail.com>
 Scott Bessler <scottbessler@gmail.com>
@@ -1545,6 +1557,7 @@
 Sevki Hasirci <s@sevki.org>
 Shane Canon <scanon@lbl.gov>
 Shane da Silva <shane@dasilva.io>
+Shaun Kaasten <shaunk@gmail.com>
 shaunol <shaunol@gmail.com>
 Shawn Landden <shawn@churchofgit.com>
 Shawn Siefkas <shawn.siefkas@meredith.com>
@@ -1572,9 +1585,9 @@
 Simon Leinen <simon.leinen@gmail.com>
 Simon Menke <simon.menke@gmail.com>
 Simon Taranto <simon.taranto@gmail.com>
+Simon Vikstrom <pullreq@devsn.se>
 Sindhu S <sindhus@live.in>
 Sjoerd Langkemper <sjoerd-github@linuxonly.nl>
-skaasten <shaunk@gmail.com>
 Solganik Alexander <solganik@gmail.com>
 Solomon Hykes <solomon@docker.com>
 Song Gao <song@gao.io>
@@ -1621,6 +1634,7 @@
 Sylvain Baubeau <sbaubeau@redhat.com>
 Sylvain Bellemare <sylvain@ascribe.io>
 Sébastien <sebastien@yoozio.com>
+Sébastien HOUZÉ <cto@verylastroom.com>
 Sébastien Luttringer <seblu@seblu.net>
 Sébastien Stormacq <sebsto@users.noreply.github.com>
 Tabakhase <mail@tabakhase.com>
@@ -1656,6 +1670,7 @@
 Thomas Swift <tgs242@gmail.com>
 Thomas Tanaka <thomas.tanaka@oracle.com>
 Thomas Texier <sharkone@en-mousse.org>
+Ti Zhou <tizhou1986@gmail.com>
 Tianon Gravi <admwiggin@gmail.com>
 Tianyi Wang <capkurmagati@gmail.com>
 Tibor Vass <teabee89@gmail.com>
@@ -1696,6 +1711,7 @@
 Tom Howe <tom.howe@enstratius.com>
 Tom Hulihan <hulihan.tom159@gmail.com>
 Tom Maaswinkel <tom.maaswinkel@12wiki.eu>
+Tom Sweeney <tsweeney@redhat.com>
 Tom Wilkie <tom.wilkie@gmail.com>
 Tom X. Tobin <tomxtobin@tomxtobin.com>
 Tomas Tomecek <ttomecek@redhat.com>
@@ -1720,9 +1736,10 @@
 Trevor <trevinwoodstock@gmail.com>
 Trevor Pounds <trevor.pounds@gmail.com>
 Trevor Sullivan <pcgeek86@gmail.com>
-trishnaguha <trishnaguha17@gmail.com>
+Trishna Guha <trishnaguha17@gmail.com>
 Tristan Carel <tristan@cogniteev.com>
 Troy Denton <trdenton@gmail.com>
+Tycho Andersen <tycho@docker.com>
 Tyler Brock <tyler.brock@gmail.com>
 Tzu-Jung Lee <roylee17@gmail.com>
 uhayate <uhayate.gong@daocloud.io>
@@ -1771,6 +1788,7 @@
 Walter Leibbrandt <github@wrl.co.za>
 Walter Stanish <walter@pratyeka.org>
 WANG Chao <wcwxyz@gmail.com>
+Wang Guoliang <liangcszzu@163.com>
 Wang Jie <wangjie5@chinaskycloud.com>
 Wang Long <long.wanglong@huawei.com>
 Wang Ping <present.wp@icloud.com>
@@ -1786,6 +1804,7 @@
 Weiyang Zhu <cnresonant@gmail.com>
 Wen Cheng Ma <wenchma@cn.ibm.com>
 Wendel Fleming <wfleming@usc.edu>
+Wenjun Tang <tangwj2@lenovo.com>
 Wenkai Yin <yinw@vmware.com>
 Wentao Zhang <zhangwentao234@huawei.com>
 Wenxuan Zhao <viz@linux.com>
@@ -1819,6 +1838,7 @@
 Xinzi Zhou <imdreamrunner@gmail.com>
 Xiuming Chen <cc@cxm.cc>
 xlgao-zju <xlgao@zju.edu.cn>
+Xuecong Liao <satorulogic@gmail.com>
 xuzhaokui <cynicholas@gmail.com>
 Yahya <ya7yaz@gmail.com>
 YAMADA Tsuyoshi <tyamada@minimum2scp.org>
@@ -1826,6 +1846,7 @@
 Yan Feng <yanfeng2@huawei.com>
 Yang Bai <hamo.by@gmail.com>
 Yang Pengfei <yangpengfei4@huawei.com>
+yangchenliang <yangchenliang@huawei.com>
 Yanqiang Miao <miao.yanqiang@zte.com.cn>
 Yao Zaiyong <yaozaiyong@hotmail.com>
 Yassine Tijani <yasstij11@gmail.com>
@@ -1844,8 +1865,10 @@
 Yu Changchun <yuchangchun1@huawei.com>
 Yu Chengxia <yuchengxia@huawei.com>
 Yu Peng <yu.peng36@zte.com.cn>
+Yu-Ju Hong <yjhong@google.com>
 Yuan Sun <sunyuan3@huawei.com>
 Yuanhong Peng <pengyuanhong@huawei.com>
+Yuhao Fang <fangyuhao@gmail.com>
 Yunxiang Huang <hyxqshk@vip.qq.com>
 Yurii Rashkovskii <yrashk@gmail.com>
 yuzou <zouyu7@huawei.com>
@@ -1860,6 +1883,7 @@
 Zhang Kun <zkazure@gmail.com>
 Zhang Wei <zhangwei555@huawei.com>
 Zhang Wentao <zhangwentao234@huawei.com>
+ZhangHang <stevezhang2014@gmail.com>
 zhangxianwei <xianwei.zw@alibaba-inc.com>
 Zhenan Ye <21551168@zju.edu.cn>
 zhenghenghuo <zhenghenghuo@zju.edu.cn>
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index c55e13b..b7961e1 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,8 +1,8 @@
-# Contributing to Docker
+# Contribute to the Moby Project
 
-Want to hack on Docker? Awesome!  We have a contributor's guide that explains
-[setting up a Docker development environment and the contribution
-process](https://docs.docker.com/opensource/project/who-written-for/). 
+Want to hack on the Moby Project? Awesome! We have a contributor's guide that explains
+[setting up a development environment and the contribution
+process](docs/contributing/). 
 
 [![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/)
 
@@ -21,14 +21,14 @@
 
 ## Reporting security issues
 
-The Docker maintainers take security seriously. If you discover a security
+The Moby maintainers take security seriously. If you discover a security
 issue, please bring it to their attention right away!
 
 Please **DO NOT** file a public issue, instead send your report privately to
 [security@docker.com](mailto:security@docker.com).
 
 Security reports are greatly appreciated and we will publicly thank you for it.
-We also like to send gifts&mdash;if you're into Docker schwag, make sure to let
+We also like to send gifts&mdash;if you're into schwag, make sure to let
 us know. We currently do not offer a paid security bounty program, but are not
 ruling it out in the future.
 
@@ -83,11 +83,7 @@
 section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in
 the contributors guide.
 
-We try hard to keep Docker lean and focused. Docker can't do everything for
-everybody. This means that we might decide against incorporating a new feature.
-However, there might be a way to implement that feature *on top of* Docker.
-
-### Talking to other Docker users and contributors
+### Connect with other Moby Project contributors
 
 <table class="tg">
   <col width="45%">
@@ -96,52 +92,29 @@
     <td>Forums</td>
     <td>
       A public forum for users to discuss questions and explore current design patterns and
-      best practices about Docker and related projects in the Docker Ecosystem. To participate,
-      just log in with your Docker Hub account on <a href="https://forums.docker.com" target="_blank">https://forums.docker.com</a>.
+      best practices about all the Moby projects. To participate, log in with your Github
+      account or create an account at <a href="https://forums.mobyproject.org" target="_blank">https://forums.mobyproject.org</a>.
     </td>
   </tr>
   <tr>
-    <td>Internet&nbsp;Relay&nbsp;Chat&nbsp;(IRC)</td>
+    <td>Slack</td>
     <td>
       <p>
-        IRC a direct line to our most knowledgeable Docker users; we have
-        both the  <code>#docker</code> and <code>#docker-dev</code> group on
-        <strong>irc.freenode.net</strong>.
-        IRC is a rich chat protocol but it can overwhelm new users. You can search
-        <a href="https://botbot.me/freenode/docker/#" target="_blank">our chat archives</a>.
+        Register for the Docker Community Slack at
+	<a href="https://community.docker.com/registrations/groups/4316" target="_blank">https://community.docker.com/registrations/groups/4316</a>.
+        We use the #moby-project channel for general discussion, and there are seperate channels for other Moby projects such as #containerd.
+	Archives are available at <a href="https://dockercommunity.slackarchive.io/" target="_blank">https://dockercommunity.slackarchive.io/</a>.
       </p>
-      <p>
-        Read our <a href="https://docs.docker.com/opensource/get-help/#irc-quickstart" target="_blank">IRC quickstart guide</a>
-        for an easy way to get started.
-      </p>
-    </td>
-  </tr>
-  <tr>
-    <td>Google Group</td>
-    <td>
-      The <a href="https://groups.google.com/forum/#!forum/docker-dev" target="_blank">docker-dev</a>
-      group is for contributors and other people contributing to the Docker project.
-      You can join them without a google account by sending an email to 
-      <a href="mailto:docker-dev+subscribe@googlegroups.com">docker-dev+subscribe@googlegroups.com</a>.
-      After receiving the join-request message, you can simply reply to that to confirm the subscription.
     </td>
   </tr>
   <tr>
     <td>Twitter</td>
     <td>
-      You can follow <a href="https://twitter.com/docker/" target="_blank">Docker's Twitter feed</a>
+      You can follow <a href="https://twitter.com/moby/" target="_blank">Moby Project Twitter feed</a>
       to get updates on our products. You can also tweet us questions or just
       share blogs or stories.
     </td>
   </tr>
-  <tr>
-    <td>Stack Overflow</td>
-    <td>
-      Stack Overflow has thousands of Docker questions listed. We regularly
-      monitor <a href="https://stackoverflow.com/search?tab=newest&q=docker" target="_blank">Docker questions</a>
-      and so do many other knowledgeable Docker users.
-    </td>
-  </tr>
 </table>
 
 
@@ -159,7 +132,7 @@
 
 If your changes need integration tests, write them against the API. The `cli`
 integration tests are slowly either migrated to API tests or moved away as unit
-tests in `docker/cli` and end-to-end tests for docker.
+tests in `docker/cli` and end-to-end tests for Docker.
 
 Update the documentation when creating or modifying features. Test your
 documentation changes for clarity, concision, and correctness, as well as a
@@ -266,15 +239,11 @@
 
 ### Merge approval
 
-Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to
-indicate acceptance.
+Moby maintainers use LGTM (Looks Good To Me) in comments on the code review to
+indicate acceptance, or use the Github review approval feature.
 
-A change requires LGTMs from an absolute majority of the maintainers of each
-component affected. For example, if a change affects `docs/` and `registry/`, it
-needs an absolute majority from the maintainers of `docs/` AND, separately, an
-absolute majority of the maintainers of `registry/`.
-
-For more details, see the [MAINTAINERS](MAINTAINERS) page.
+For an explanation of the review and approval process see the
+[REVIEWING](project/REVIEWING.md) page.
 
 ### Sign your work
 
@@ -342,9 +311,9 @@
 will have time to make yourself available. You don't have to be a
 maintainer to make a difference on the project!
 
-## Docker community guidelines
+## Moby community guidelines
 
-We want to keep the Docker community awesome, growing and collaborative. We need
+We want to keep the Moby community awesome, growing and collaborative. We need
 your help to keep it that way. To help with this we've come up with some general
 guidelines for the community as a whole:
 
diff --git a/Dockerfile b/Dockerfile
index 8971edf..25da313 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -23,7 +23,7 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM debian:jessie
+FROM debian:stretch
 
 # allow replacing httpredir or deb mirror
 ARG APT_MIRROR=deb.debian.org
@@ -51,21 +51,29 @@
 	less \
 	libapparmor-dev \
 	libcap-dev \
+	libdevmapper-dev \
 	libnl-3-dev \
 	libprotobuf-c0-dev \
 	libprotobuf-dev \
-	libsystemd-journal-dev \
+	libseccomp-dev \
+	libsystemd-dev \
 	libtool \
+	libudev-dev \
 	mercurial \
 	net-tools \
 	pkg-config \
 	protobuf-compiler \
 	protobuf-c-compiler \
+	python-backports.ssl-match-hostname \
 	python-dev \
 	python-mock \
 	python-pip \
+	python-requests \
+	python-setuptools \
 	python-websocket \
+	python-wheel \
 	tar \
+	thin-provisioning-tools \
 	vim \
 	vim-common \
 	xfsprogs \
@@ -73,42 +81,12 @@
 	--no-install-recommends \
 	&& pip install awscli==1.10.15
 
-# Get lvm2 sources to build statically linked devmapper library
-ENV LVM2_VERSION 2.02.173
-RUN mkdir -p /usr/local/lvm2 \
-	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
-		| tar -xzC /usr/local/lvm2 --strip-components=1
-
-# Compile and install (only the needed library)
-RUN cd /usr/local/lvm2 \
-	&& ./configure \
-		--build="$(gcc -print-multiarch)" \
-		--enable-static_link \
-		--enable-pkgconfig \
-	&& make -C include \
-	&& make -C libdm install_device-mapper
-
-# Install seccomp: the version shipped upstream is too old
-ENV SECCOMP_VERSION 2.3.2
-RUN set -x \
-	&& export SECCOMP_PATH="$(mktemp -d)" \
-	&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
-		| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
-	&& ( \
-		cd "$SECCOMP_PATH" \
-		&& ./configure --prefix=/usr/local \
-		&& make \
-		&& make install \
-		&& ldconfig \
-	) \
-	&& rm -rf "$SECCOMP_PATH"
-
 # Install Go
 # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
 #            will need updating, to avoid errors. Ping #docker-maintainers on IRC
 #            with a heads-up.
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
 	| tar -xzC /usr/local
 
@@ -155,11 +133,8 @@
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
+ENV DOCKER_PY_COMMIT ca7a6132a418c32df6bb11ba9b2a8b9b2727227a
 # To run integration tests docker-pycreds is required.
-# Before running the integration tests conftest.py is
-# loaded which results in loads auth.py that
-# imports the docker-pycreds module.
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT \
@@ -216,3 +191,6 @@
 
 # Upload docker source
 COPY . /go/src/github.com/docker/docker
+
+# Options for hack/validate/gometalinter
+ENV GOMETALINTER_OPTS="--deadline 2m"
diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64
index 5f3b25c..97dff46 100644
--- a/Dockerfile.aarch64
+++ b/Dockerfile.aarch64
@@ -15,87 +15,69 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM aarch64/ubuntu:xenial
+FROM arm64v8/debian:stretch
+
+# allow replacing httpredir or deb mirror
+ARG APT_MIRROR=deb.debian.org
+RUN sed -ri "s/(httpredir|deb).debian.org/$APT_MIRROR/g" /etc/apt/sources.list
 
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
 	apparmor \
+	apt-utils \
 	aufs-tools \
 	automake \
 	bash-completion \
+	bsdmainutils \
 	btrfs-tools \
 	build-essential \
 	cmake \
 	createrepo \
 	curl \
 	dpkg-sig \
-	g++ \
 	gcc \
 	git \
 	iptables \
 	jq \
+	less \
 	libapparmor-dev \
-	libc6-dev \
 	libcap-dev \
+	libdevmapper-dev \
+	libnl-3-dev \
+	libprotobuf-c0-dev \
+	libprotobuf-dev \
+	libseccomp-dev \
 	libsystemd-dev \
-	libyaml-dev \
+	libtool \
+	libudev-dev \
 	mercurial \
 	net-tools \
-	parallel \
 	pkg-config \
+	protobuf-compiler \
+	protobuf-c-compiler \
+	python-backports.ssl-match-hostname \
 	python-dev \
 	python-mock \
 	python-pip \
+	python-requests \
 	python-setuptools \
 	python-websocket \
-	golang-go \
-	iproute2 \
-	iputils-ping \
+	python-wheel \
+	tar \
+	thin-provisioning-tools \
+	vim \
 	vim-common \
+	xfsprogs \
+	zip \
 	--no-install-recommends
 
-# Get lvm2 sources to build statically linked devmapper library
-ENV LVM2_VERSION 2.02.173
-RUN mkdir -p /usr/local/lvm2 \
-	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
-		| tar -xzC /usr/local/lvm2 --strip-components=1
-
-# Compile and install (only the needed library)
-RUN cd /usr/local/lvm2 \
-	&& ./configure \
-		--build="$(gcc -print-multiarch)" \
-		--enable-static_link \
-		--enable-pkgconfig \
-	&& make -C include \
-	&& make -C libdm install_device-mapper
-
-# Install seccomp: the version shipped upstream is too old
-ENV SECCOMP_VERSION 2.3.2
-RUN set -x \
-	&& export SECCOMP_PATH="$(mktemp -d)" \
-	&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
-		| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
-	&& ( \
-		cd "$SECCOMP_PATH" \
-		&& ./configure --prefix=/usr/local \
-		&& make \
-		&& make install \
-		&& ldconfig \
-	) \
-	&& rm -rf "$SECCOMP_PATH"
-
 # Install Go
-# We don't have official binary golang 1.7.5 tarballs for ARM64, either for Go or
-# bootstrap, so we use golang-go (1.6) as bootstrap to build Go from source code.
-# We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because
-# not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8.
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.3
-RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
-	&& cd /usr/src/go/src \
-	&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
+ENV GO_VERSION 1.8.5
+RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" \
+	| tar -xzC /usr/local
 
-ENV PATH /go/bin:/usr/src/go/bin:$PATH
+ENV PATH /go/bin:/usr/local/go/bin:$PATH
 ENV GOPATH /go
 
 # Only install one version of the registry, because old version which support
@@ -123,14 +105,11 @@
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
-# Before running the integration tests conftest.py is
-# loaded which results in loads auth.py that
-# imports the docker-pycreds module.
+ENV DOCKER_PY_COMMIT ca7a6132a418c32df6bb11ba9b2a8b9b2727227a
+# To run integration tests docker-pycreds is required.
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT \
-	&& pip install wheel \
 	&& pip install docker-pycreds==0.2.1 \
 	&& pip install -r test-requirements.txt
 
@@ -173,7 +152,7 @@
 # Please edit hack/dockerfile/install-binaries.sh to update them.
 COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
 COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
-RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
+RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter
 ENV PATH=/usr/local/cli:$PATH
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
@@ -181,3 +160,6 @@
 
 # Upload docker source
 COPY . /go/src/github.com/docker/docker
+
+# Options for hack/validate/gometalinter
+ENV GOMETALINTER_OPTS="--deadline 4m -j2"
diff --git a/Dockerfile.armhf b/Dockerfile.armhf
index 18cd74f..30ddf8c 100644
--- a/Dockerfile.armhf
+++ b/Dockerfile.armhf
@@ -15,7 +15,7 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM armhf/debian:jessie
+FROM arm32v7/debian:stretch
 
 # allow replacing httpredir or deb mirror
 ARG APT_MIRROR=deb.debian.org
@@ -39,39 +39,31 @@
 	net-tools \
 	libapparmor-dev \
 	libcap-dev \
-	libsystemd-journal-dev \
+	libdevmapper-dev \
+	libseccomp-dev \
+	libsystemd-dev \
 	libtool \
+	libudev-dev \
 	mercurial \
 	pkg-config \
+	python-backports.ssl-match-hostname \
 	python-dev \
 	python-mock \
 	python-pip \
+	python-requests \
+	python-setuptools \
 	python-websocket \
+	python-wheel \
 	xfsprogs \
 	tar \
+	thin-provisioning-tools \
 	vim-common \
 	--no-install-recommends \
 	&& pip install awscli==1.10.15
 
-# Get lvm2 sources to build statically linked devmapper library
-ENV LVM2_VERSION 2.02.173
-RUN mkdir -p /usr/local/lvm2 \
-	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
-		| tar -xzC /usr/local/lvm2 --strip-components=1
-
-# Compile and install (only the needed library)
-RUN cd /usr/local/lvm2 \
-	&& ./configure \
-		--build="$(gcc -print-multiarch)" \
-		--enable-static_link \
-		--enable-pkgconfig \
-	&& make -C include \
-	&& make -C libdm install_device-mapper
-
-
 # Install Go
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \
 	| tar -xzC /usr/local
 ENV PATH /go/bin:/usr/local/go/bin:$PATH
@@ -81,21 +73,6 @@
 ENV GOARCH arm
 ENV GOARM 7
 
-# Install seccomp: the version shipped upstream is too old
-ENV SECCOMP_VERSION 2.3.2
-RUN set -x \
-	&& export SECCOMP_PATH="$(mktemp -d)" \
-	&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
-		| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
-	&& ( \
-		cd "$SECCOMP_PATH" \
-		&& ./configure --prefix=/usr/local \
-		&& make \
-		&& make install \
-		&& ldconfig \
-	) \
-	&& rm -rf "$SECCOMP_PATH"
-
 # Install two versions of the registry. The first is an older version that
 # only supports schema1 manifests. The second is a newer version that supports
 # both. This allows integration-cli tests to cover push/pull with both schema1
@@ -126,10 +103,12 @@
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
+ENV DOCKER_PY_COMMIT ca7a6132a418c32df6bb11ba9b2a8b9b2727227a
+# To run integration tests docker-pycreds is required.
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT \
+	&& pip install docker-pycreds==0.2.1 \
 	&& pip install -r test-requirements.txt
 
 # Set user.email so crosbymichael's in-container merge commits go smoothly
@@ -162,10 +141,13 @@
 # Please edit hack/dockerfile/install-binaries.sh to update them.
 COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
 COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
-RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
+RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter
 ENV PATH=/usr/local/cli:$PATH
 
 ENTRYPOINT ["hack/dind"]
 
 # Upload docker source
 COPY . /go/src/github.com/docker/docker
+
+# Options for hack/validate/gometalinter
+ENV GOMETALINTER_OPTS="--deadline 10m -j2"
diff --git a/Dockerfile.e2e b/Dockerfile.e2e
new file mode 100644
index 0000000..c57d7d3
--- /dev/null
+++ b/Dockerfile.e2e
@@ -0,0 +1,71 @@
+## Step 1: Build tests
+FROM golang:1.8.5-alpine3.6 as builder
+
+RUN apk add --update \
+    bash \
+    btrfs-progs-dev \
+    build-base \
+    curl \
+    lvm2-dev \
+    jq \
+    && rm -rf /var/cache/apk/*
+
+RUN mkdir -p /go/src/github.com/docker/docker/
+WORKDIR /go/src/github.com/docker/docker/
+
+# Generate frozen images
+COPY contrib/download-frozen-image-v2.sh contrib/download-frozen-image-v2.sh
+RUN contrib/download-frozen-image-v2.sh /output/docker-frozen-images \
+  buildpack-deps:jessie@sha256:85b379ec16065e4fe4127eb1c5fb1bcc03c559bd36dbb2e22ff496de55925fa6 \
+  busybox:latest@sha256:32f093055929dbc23dec4d03e09dfe971f5973a9ca5cf059cbfb644c206aa83f \
+  debian:jessie@sha256:72f784399fd2719b4cb4e16ef8e369a39dc67f53d978cd3e2e7bf4e502c7b793 \
+  hello-world:latest@sha256:c5515758d4c5e1e838e9cd307f6c6a0d620b5e07e6f927b07d05f6d12a1ac8d7
+
+# Download Docker CLI binary
+COPY hack/dockerfile hack/dockerfile
+RUN hack/dockerfile/install-binaries.sh dockercli
+
+# Set tag and add sources
+ARG DOCKER_GITCOMMIT
+ENV DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT
+ADD . .
+
+# Build DockerSuite.TestBuild* dependency
+RUN CGO_ENABLED=0 go build -o /output/httpserver github.com/docker/docker/contrib/httpserver
+
+# Build the integration tests and copy the resulting binaries to /output/tests
+RUN hack/make.sh build-integration-test-binary
+RUN mkdir -p /output/tests && find . -name test.main -exec cp --parents '{}' /output/tests \;
+
+## Step 2: Generate testing image
+FROM alpine:3.6 as runner
+
+# GNU tar is used for generating the emptyfs image
+RUN apk add --update \
+    bash \
+    ca-certificates \
+    g++ \
+    git \
+    iptables \
+    tar \
+    xz \
+    && rm -rf /var/cache/apk/*
+
+# Add an unprivileged user to be used for tests which need it
+RUN addgroup docker && adduser -D -G docker unprivilegeduser -s /bin/ash
+
+COPY contrib/httpserver/Dockerfile /tests/contrib/httpserver/Dockerfile
+COPY contrib/syscall-test /tests/contrib/syscall-test
+COPY integration-cli/fixtures /tests/integration-cli/fixtures
+
+COPY hack/test/e2e-run.sh /scripts/run.sh
+COPY hack/make/.ensure-emptyfs /scripts/ensure-emptyfs.sh
+
+COPY --from=builder /output/docker-frozen-images /docker-frozen-images
+COPY --from=builder /output/httpserver /tests/contrib/httpserver/httpserver
+COPY --from=builder /output/tests /tests
+COPY --from=builder /usr/local/bin/docker /usr/bin/docker
+
+ENV DOCKER_REMOTE_DAEMON=1 DOCKER_INTEGRATION_DAEMON_DEST=/
+
+ENTRYPOINT ["/scripts/run.sh"]
diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le
index 3234239..709a6e6 100644
--- a/Dockerfile.ppc64le
+++ b/Dockerfile.ppc64le
@@ -15,7 +15,7 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM ppc64le/debian:jessie
+FROM ppc64le/debian:stretch
 
 # allow replacing httpredir or deb mirror
 ARG APT_MIRROR=deb.debian.org
@@ -40,54 +40,31 @@
 	net-tools \
 	libapparmor-dev \
 	libcap-dev \
-	libsystemd-journal-dev \
+	libdevmapper-dev \
+	libseccomp-dev \
+	libsystemd-dev \
 	libtool \
+	libudev-dev \
 	mercurial \
 	pkg-config \
+	python-backports.ssl-match-hostname \
 	python-dev \
 	python-mock \
 	python-pip \
+	python-requests \
+	python-setuptools \
 	python-websocket \
+	python-wheel \
 	xfsprogs \
 	tar \
+	thin-provisioning-tools \
 	vim-common \
 	--no-install-recommends
 
-# Get lvm2 sources to build statically linked devmapper library
-ENV LVM2_VERSION 2.02.173
-RUN mkdir -p /usr/local/lvm2 \
-	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
-		| tar -xzC /usr/local/lvm2 --strip-components=1
-
-# Compile and install (only the needed library)
-RUN cd /usr/local/lvm2 \
-	&& ./configure \
-		--build="$(gcc -print-multiarch)" \
-		--enable-static_link \
-		--enable-pkgconfig \
-	&& make -C include \
-	&& make -C libdm install_device-mapper
-
-# Install seccomp: the version shipped upstream is too old
-ENV SECCOMP_VERSION 2.3.2
-RUN set -x \
-        && export SECCOMP_PATH="$(mktemp -d)" \
-        && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
-                | tar -xzC "$SECCOMP_PATH" --strip-components=1 \
-        && ( \
-                cd "$SECCOMP_PATH" \
-                && ./configure --prefix=/usr/local \
-                && make \
-                && make install \
-                && ldconfig \
-        ) \
-        && rm -rf "$SECCOMP_PATH"
-
-
 # Install Go
 # NOTE: official ppc64le go binaries weren't available until go 1.6.4 and 1.7.4
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" \
 	| tar -xzC /usr/local
 
@@ -124,10 +101,12 @@
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
+ENV DOCKER_PY_COMMIT ca7a6132a418c32df6bb11ba9b2a8b9b2727227a
+# To run integration tests docker-pycreds is required.
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT \
+	&& pip install docker-pycreds==0.2.1 \
 	&& pip install -r test-requirements.txt
 
 # Set user.email so crosbymichael's in-container merge commits go smoothly
@@ -160,7 +139,7 @@
 # Please edit hack/dockerfile/install-binaries.sh to update them.
 COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
 COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
-RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
+RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter
 ENV PATH=/usr/local/cli:$PATH
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
diff --git a/Dockerfile.s390x b/Dockerfile.s390x
index 14dfd12..752d052 100644
--- a/Dockerfile.s390x
+++ b/Dockerfile.s390x
@@ -15,7 +15,7 @@
 # the case. Therefore, you don't have to disable it anymore.
 #
 
-FROM s390x/debian:jessie
+FROM s390x/debian:stretch
 
 # Packaged dependencies
 RUN apt-get update && apt-get install -y \
@@ -36,51 +36,29 @@
 	net-tools \
 	libapparmor-dev \
 	libcap-dev \
-	libsystemd-journal-dev \
+	libdevmapper-dev \
+	libseccomp-dev \
+	libsystemd-dev \
 	libtool \
+	libudev-dev \
 	mercurial \
 	pkg-config \
+	python-backports.ssl-match-hostname \
 	python-dev \
 	python-mock \
 	python-pip \
+	python-requests \
+	python-setuptools \
 	python-websocket \
+	python-wheel \
 	xfsprogs \
 	tar \
+	thin-provisioning-tools \
 	vim-common \
 	--no-install-recommends
 
-# Install seccomp: the version shipped upstream is too old
-ENV SECCOMP_VERSION 2.3.2
-RUN set -x \
-	&& export SECCOMP_PATH="$(mktemp -d)" \
-	&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
-		| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
-	&& ( \
-		cd "$SECCOMP_PATH" \
-		&& ./configure --prefix=/usr/local \
-		&& make \
-		&& make install \
-		&& ldconfig \
-	) \
-	&& rm -rf "$SECCOMP_PATH"
-
-# Get lvm2 sources to build statically linked devmapper library
-ENV LVM2_VERSION 2.02.173
-RUN mkdir -p /usr/local/lvm2 \
-	&& curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \
-		| tar -xzC /usr/local/lvm2 --strip-components=1
-
-# Compile and install (only the needed library)
-RUN cd /usr/local/lvm2 \
-	&& ./configure \
-		--build="$(gcc -print-multiarch)" \
-		--enable-static_link \
-		--enable-pkgconfig \
-	&& make -C include \
-	&& make -C libdm install_device-mapper
-
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \
 	| tar -xzC /usr/local
 
@@ -117,10 +95,12 @@
 	&& rm -rf "$GOPATH"
 
 # Get the "docker-py" source so we can run their integration tests
-ENV DOCKER_PY_COMMIT a962578e515185cf06506050b2200c0b81aa84ef
+ENV DOCKER_PY_COMMIT ca7a6132a418c32df6bb11ba9b2a8b9b2727227a
+# To run integration tests docker-pycreds is required.
 RUN git clone https://github.com/docker/docker-py.git /docker-py \
 	&& cd /docker-py \
 	&& git checkout -q $DOCKER_PY_COMMIT \
+	&& pip install docker-pycreds==0.2.1 \
 	&& pip install -r test-requirements.txt
 
 # Set user.email so crosbymichael's in-container merge commits go smoothly
@@ -153,7 +133,7 @@
 # Please edit hack/dockerfile/install-binaries.sh to update them.
 COPY hack/dockerfile/binaries-commits /tmp/binaries-commits
 COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh
-RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli
+RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy dockercli gometalinter
 ENV PATH=/usr/local/cli:$PATH
 
 # Wrap all commands in the "docker-in-docker" script to allow nested containers
diff --git a/Dockerfile.simple b/Dockerfile.simple
index f84f3c5..c84025e 100644
--- a/Dockerfile.simple
+++ b/Dockerfile.simple
@@ -5,7 +5,7 @@
 
 # This represents the bare minimum required to build and test Docker.
 
-FROM debian:jessie
+FROM debian:stretch
 
 # allow replacing httpredir or deb mirror
 ARG APT_MIRROR=deb.debian.org
@@ -23,6 +23,7 @@
 		git \
 		libapparmor-dev \
 		libdevmapper-dev \
+		libseccomp-dev \
 		ca-certificates \
 		e2fsprogs \
 		iptables \
@@ -34,27 +35,12 @@
 		vim-common \
 	&& rm -rf /var/lib/apt/lists/*
 
-# Install seccomp: the version shipped upstream is too old
-ENV SECCOMP_VERSION 2.3.2
-RUN set -x \
-	&& export SECCOMP_PATH="$(mktemp -d)" \
-	&& curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \
-		| tar -xzC "$SECCOMP_PATH" --strip-components=1 \
-	&& ( \
-		cd "$SECCOMP_PATH" \
-		&& ./configure --prefix=/usr/local \
-		&& make \
-		&& make install \
-		&& ldconfig \
-	) \
-	&& rm -rf "$SECCOMP_PATH"
-
 # Install Go
 # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines
 #            will need updating, to avoid errors. Ping #docker-maintainers on IRC
 #            with a heads-up.
 # IMPORTANT: When updating this please note that stdlib archive/tar pkg is vendored
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \
 	| tar -xzC /usr/local
 ENV PATH /go/bin:/usr/local/go/bin:$PATH
diff --git a/Dockerfile.solaris b/Dockerfile.solaris
deleted file mode 100644
index 4198b13..0000000
--- a/Dockerfile.solaris
+++ /dev/null
@@ -1,19 +0,0 @@
-# Defines an image that hosts a native Docker build environment for Solaris
-# TODO: Improve stub
-
-FROM solaris:latest
-
-# compile and runtime deps
-RUN pkg install --accept \
-		git \
-		gnu-coreutils \
-		gnu-make \
-		gnu-tar \
-		diagnostic/top \
-		golang \
-		library/golang/* \
-		developer/gcc-*
-
-ENV GOPATH /go/:/usr/lib/gocode/1.5/
-WORKDIR /go/src/github.com/docker/docker
-COPY . /go/src/github.com/docker/docker
diff --git a/Dockerfile.windows b/Dockerfile.windows
index 0c62c94..2671aea 100644
--- a/Dockerfile.windows
+++ b/Dockerfile.windows
@@ -161,7 +161,7 @@
 # Environment variable notes:
 #  - GO_VERSION must be consistent with 'Dockerfile' used by Linux.
 #  - FROM_DOCKERFILE is used for detection of building within a container.
-ENV GO_VERSION=1.8.3 `
+ENV GO_VERSION=1.8.5 `
     GIT_VERSION=2.11.1 `
     GOPATH=C:\go `
     FROM_DOCKERFILE=1
diff --git a/MAINTAINERS b/MAINTAINERS
index 9b415b2..d81caba 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1,4 +1,4 @@
-# Docker maintainers file
+# Moby maintainers file
 #
 # This file describes who runs the docker/docker project and how.
 # This is a living document - if you see something out of date or missing, speak up!
@@ -264,7 +264,7 @@
 	[people.cpuguy83]
 	Name = "Brian Goff"
 	Email = "cpuguy83@gmail.com"
-	Github = "cpuguy83"
+	GitHub = "cpuguy83"
 
 	[people.chanwit]
 	Name = "Chanwit Kaewkasi"
@@ -364,7 +364,7 @@
 	[people.misty]
 	Name = "Misty Stanley-Jones"
 	Email = "misty@docker.com"
-	GitHub = "mstanleyjones"
+	GitHub = "mistyhacks"
 
 	[people.mlaventure]
 	Name = "Kenfe-Mickaël Laventure"
diff --git a/Makefile b/Makefile
index d9fb68b..7d50afa 100644
--- a/Makefile
+++ b/Makefile
@@ -1,4 +1,4 @@
-.PHONY: all binary dynbinary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration test-unit tgz validate win
+.PHONY: all binary dynbinary build cross deb help init-go-pkg-cache install manpages rpm run shell test test-docker-py test-integration test-unit validate win
 
 # set the graph driver as the current graphdriver if not set
 DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //'))
@@ -164,9 +164,6 @@
 test-unit: build ## run the unit tests
 	$(DOCKER_RUN_DOCKER) hack/test/unit
 
-tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries
-	$(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz
-
 validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor
 	$(DOCKER_RUN_DOCKER) hack/validate/all
 
diff --git a/README.md b/README.md
index 533d771..534fd97 100644
--- a/README.md
+++ b/README.md
@@ -1,70 +1,38 @@
-### Docker users, see [Moby and Docker](https://mobyproject.org/#moby-and-docker) to clarify the relationship between the projects
-
-### Docker maintainers and contributors, see [Transitioning to Moby](#transitioning-to-moby) for more details
-
 The Moby Project
 ================
 
 ![Moby Project logo](docs/static_files/moby-project-logo.png "The Moby Project")
 
-Moby is an open-source project created by Docker to advance the software containerization movement.
-It provides a “Lego set” of dozens of components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts to experiment and exchange ideas.
+Moby is an open-source project created by Docker to enable and accelerate software containerization.
 
-# Moby
-
-## Overview
-
-At the core of Moby is a framework to assemble specialized container systems.
-It provides:
-
-- A library of containerized components for all vital aspects of a container system: OS, container runtime, orchestration, infrastructure management, networking, storage, security, build, image distribution, etc.
-- Tools to assemble the components into runnable artifacts for a variety of platforms and architectures: bare metal (both x86 and Arm); executables for Linux, Mac and Windows; VM images for popular cloud and virtualization providers.
-- A set of reference assemblies which can be used as-is, modified, or used as inspiration to create your own.
-
-All Moby components are containers, so creating new components is as easy as building a new OCI-compatible container.
+It provides a "Lego set" of toolkit components, the framework for assembling them into custom container-based systems, and a place for all container enthusiasts and professionals to experiment and exchange ideas.
+Components include container build tools, a container registry, orchestration tools, a runtime and more, and these can be used as building blocks in conjunction with other tools and projects.
 
 ## Principles
 
-Moby is an open project guided by strong principles, but modular, flexible and without too strong an opinion on user experience, so it is open to the community to help set its direction.
-The guiding principles are:
+Moby is an open project guided by strong principles, aiming to be modular, flexible and without too strong an opinion on user experience.
+It is open to the community to help set its direction.
 
+- Modular: the project includes lots of components that have well-defined functions and APIs that work together.
 - Batteries included but swappable: Moby includes enough components to build fully featured container system, but its modular architecture ensures that most of the components can be swapped by different implementations.
-- Usable security: Moby will provide secure defaults without compromising usability.
-- Container centric: Moby is built with containers, for running containers.
-
-With Moby, you should be able to describe all the components of your distributed application, from the high-level configuration files down to the kernel you would like to use and build and deploy it easily.
-
-Moby uses [containerd](https://github.com/containerd/containerd) as the default container runtime.
+- Usable security: Moby provides secure defaults without compromising usability.
+- Developer focused: The APIs are intended to be functional and useful to build powerful tools.
+They are not necessarily intended as end user tools but as components aimed at developers.
+Documentation and UX is aimed at developers not end users.
 
 ## Audience
 
-Moby is recommended for anyone who wants to assemble a container-based system. This includes:
+The Moby Project is intended for engineers, integrators and enthusiasts looking to modify, hack, fix, experiment, invent and build systems based on containers.
+It is not for people looking for a commercially supported system, but for people who want to work and learn with open source code.
 
-- Hackers who want to customize or patch their Docker build
-- System engineers or integrators building a container system
-- Infrastructure providers looking to adapt existing container systems to their environment
-- Container enthusiasts who want to experiment with the latest container tech
-- Open-source developers looking to test their project in a variety of different systems
-- Anyone curious about Docker internals and how it’s built
+## Relationship with Docker
 
-Moby is NOT recommended for:
+The components and tools in the Moby Project are initially the open source components that Docker and the community have built for the Docker Project.
+New projects can be added if they fit with the community goals. Docker is committed to using Moby as the upstream for the Docker Product.
+However, other projects are also encouraged to use Moby as an upstream, and to reuse the components in diverse ways, and all these uses will be treated in the same way. External maintainers and contributors are welcomed.
 
-- Application developers looking for an easy way to run their applications in containers. We recommend Docker CE instead.
-- Enterprise IT and development teams looking for a ready-to-use, commercially supported container platform. We recommend Docker EE instead.
-- Anyone curious about containers and looking for an easy way to learn. We recommend the [docker.com](https://www.docker.com/) website instead.
-
-# Transitioning to Moby
-
-Docker is transitioning all of its open source collaborations to the Moby project going forward.
-During the transition, all open source activity should continue as usual.
-
-We are proposing the following list of changes:
-
-- splitting up the engine into more open components
-- removing the docker UI, SDK etc to keep them in the Docker org
-- clarifying that the project is not limited to the engine, but to the assembly of all the individual components of the Docker platform
-- open-source new tools & components which we currently use to assemble the Docker product, but could benefit the community
-- defining an open, community-centric governance inspired by the Fedora project (a very successful example of balancing the needs of the community with the constraints of the primary corporate sponsor)
+The Moby project is not intended as a location for support or feature requests for Docker products, but as a place for contributors to work on open source code, fix bugs, and make the code more useful.
+The releases are supported by the maintainers, community and users, on a best efforts basis only, and are not intended for customers who want enterprise or commercial support; Docker EE is the appropriate product for these use cases.
 
 -----
 
@@ -82,7 +50,6 @@
 
 For more information, please see https://www.bis.doc.gov
 
-
 Licensing
 =========
 Moby is licensed under the Apache License, Version 2.0. See
diff --git a/ROADMAP.md b/ROADMAP.md
index 05a8695..e2e6b2b 100644
--- a/ROADMAP.md
+++ b/ROADMAP.md
@@ -1,118 +1,68 @@
-Docker Engine Roadmap
-=====================
+Moby Project Roadmap
+====================
 
 ### How should I use this document?
 
 This document provides description of items that the project decided to prioritize. This should
-serve as a reference point for Docker contributors to understand where the project is going, and
-help determine if a contribution could be conflicting with some longer terms plans.
+serve as a reference point for Moby contributors to understand where the project is going, and
+help determine if a contribution could be conflicting with some longer term plans.
 
 The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be
-refused (except for those mentioned as "frozen features" below)! We are always happy to receive
-patches for new cool features we haven't thought about, or didn't judge priority. Please however
-understand that such patches might take longer for us to review.
+refused! We are always happy to receive patches for new cool features we haven't thought about,
+or didn't judge to be a priority. Please however understand that such patches might take longer
+for us to review.
 
 ### How can I help?
 
-Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described
-in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our
+Short term objectives are listed in
+[Issues](https://github.com/moby/moby/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our
 goal is to split down the workload in such way that anybody can jump in and help. Please comment on
-issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already
-assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is
+issues if you want to work on it to avoid duplicating effort! Similarly, if a maintainer is already
+assigned on an issue you'd like to participate in, pinging him on GitHub to offer your help is
 the best way to go.
 
 ### How can I add something to the roadmap?
 
-The roadmap process is new to the Docker Engine: we are only beginning to structure and document the
+The roadmap process is new to the Moby Project: we are only beginning to structure and document the
 project objectives. Our immediate goal is to be more transparent, and work with our community to
 focus our efforts on fewer prioritized topics.
 
 We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but
-we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we
-won't be accepting pull requests adding or removing items from this file.
+we are not quite there yet. For the time being, it is best to discuss with the maintainers on an
+issue, in the Slack channel, or in person at the Moby Summits that happen every few months.
 
 # 1. Features and refactoring
 
 ## 1.1 Runtime improvements
 
-We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container
-execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional
-default libcontainer `execdriver`, but the Engine internals were not ready for this.
+We introduced [`runC`](https://runc.io) as a standalone low-level tool for container
+execution in 2015, the first stage in spinning out parts of the Engine into standalone tools.
 
 As runC continued evolving, and the OCI specification along with it, we created
-[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is
-the new target for Engine integration, as it can entirely replace the whole `execdriver`
-architecture, and container monitoring along with it.
+[`containerd`](https://github.com/containerd/containerd), a daemon to control and monitor `runC`.
+In late 2016 this was relaunched as the `containerd` 1.0 track, aiming to provide a common runtime
+for the whole spectrum of container systems, including Kubernetes, with wide community support.
+This change meant that there was an increased scope for `containerd`, including image management
+and storage drivers.
 
-Docker Engine will rely on a long-running `containerd` companion daemon for all container execution
+Moby will rely on a long-running `containerd` companion daemon for all container execution
 related operations. This could open the door in the future for Engine restarts without interrupting
-running containers.
+running containers. The switch over to containerd 1.0 is an important goal for the project, and
+will result in a significant simplification of the functions implemented in this repository.
 
-## 1.2 Plugins improvements
+## 1.2 Internal decoupling
 
-Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks
-extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real
-world feedback before optimizing for any particular workflow.
+A lot of work has been done in trying to decouple Moby internals. This process of creating
+standalone projects with a well defined function that attract a dedicated community should continue.
+As well as integrating `containerd` we would like to integrate [BuildKit](https://github.com/moby/buildkit)
+as the next standalone component.
 
-In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of
-plugins. This implies in particular making it trivially easy to distribute plugins as containers
-through any Registry instance, as well as solving the commonly heard pain points of plugins needing
-to be treated as somewhat special (being active at all time, started before any other user
-containers, and not as easily dismissed).
+We see gRPC as the natural communication layer between decoupled components.
 
-## 1.3 Internal decoupling
+## 1.3 Custom assembly tooling
 
-A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the
-API implementation has been refactored, and the Builder side of the daemon is now
-[fully independent](https://github.com/docker/docker/tree/master/builder) while still residing in
-the same repository.
-
-We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the
-runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support
-with the concept of "special" containers opens the door for bootstrapping more Engine internals
-using the same facilities.
-
-## 1.4 Cluster capable Engine
-
-The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent
-adding features such as multihost networking, and node discovery down at the Engine level. Yet, the
-Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm
-for that.
-
-We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the
-Docker Engine being already capable of discovering each other and establish overlay networking for
-their container to communicate, the next step is for a given Engine to gain ability to dispatch work
-to another node in the cluster. This will be introduced in a backward compatible way, such that a
-`docker run` invocation on a particular node remains fully deterministic.
-
-# 2. Frozen features
-
-## 2.1 Docker exec
-
-We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a
-*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort.
-
-## 2.2 Remote Registry Operations
-
-A large amount of work is ongoing in the area of image distribution and provenance. This includes
-moving to the V2 Registry API and heavily refactoring the code that powers these features. The
-desired result is more secure, reliable and easier to use image distribution.
-
-Part of the problem with this part of the code base is the lack of a stable and flexible interface.
-If new features are added that access the registry without solidifying these interfaces, achieving
-feature parity will continue to be elusive. While we get a handle on this situation, we are imposing
-a moratorium on new code that accesses the Registry API in commands that don't already make remote
-calls.
-
-Currently, only the following commands cause interaction with a remote registry:
-
-  - push
-  - pull
-  - run
-  - build
-  - search
-  - login
-
-In the interest of stabilizing the registry access model during this ongoing work, we are not
-accepting additions to other commands that will cause remote interaction with the Registry API. This
-moratorium will lift when the goals of the distribution project have been met.
+We have been prototyping the Moby [assembly tool](https://github.com/moby/tool) which was originally
+developed for LinuxKit and intend to turn it into a more generic packaging and assembly mechanism
+that can build not only the default version of Moby, as distribution packages or other useful forms,
+but can also build very different container systems, themselves built of cooperating daemons built in
+and running in containers. We intend to merge this functionality into this repo.
diff --git a/VERSION b/VERSION
deleted file mode 100644
index 2d736aa..0000000
--- a/VERSION
+++ /dev/null
@@ -1 +0,0 @@
-17.06.0-dev
diff --git a/api/README.md b/api/README.md
index bb88132..f136c34 100644
--- a/api/README.md
+++ b/api/README.md
@@ -10,7 +10,7 @@
 - `client/` The Go client used by the command-line client. It can also be used by third-party Go programs.
 - `daemon/` The daemon, which serves the API.
 
-## Swagger definition
+## Swagger definition
 
 The API is defined by the [Swagger](http://swagger.io/specification/) definition in `api/swagger.yaml`. This definition can be used to:
 
@@ -20,7 +20,7 @@
 
 ## Updating the API documentation
 
-The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, you'll need to edit this file to represent the change in the documentation.
+The API documentation is generated entirely from `api/swagger.yaml`. If you make updates to the API, edit this file to represent the change in the documentation.
 
 The file is split into two main sections:
 
@@ -29,9 +29,9 @@
 
 To make an edit, first look for the endpoint you want to edit under `paths`, then make the required edits. Endpoints may reference reusable objects with `$ref`, which can be found in the `definitions` section.
 
-There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919)
+There is hopefully enough example material in the file for you to copy a similar pattern from elsewhere in the file (e.g. adding new fields or endpoints), but for the full reference, see the [Swagger specification](https://github.com/docker/docker/issues/27919).
 
-`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful for when you are making edits to ensure you are doing the right thing.
+`swagger.yaml` is validated by `hack/validate/swagger` to ensure it is a valid Swagger definition. This is useful when making edits to ensure you are doing the right thing.
 
 ## Viewing the API documentation
 
diff --git a/api/common.go b/api/common.go
index 6e462ae..af34d0b 100644
--- a/api/common.go
+++ b/api/common.go
@@ -1,65 +1,11 @@
 package api
 
-import (
-	"encoding/json"
-	"encoding/pem"
-	"fmt"
-	"os"
-	"path/filepath"
-
-	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/system"
-	"github.com/docker/libtrust"
-)
-
 // Common constants for daemon and client.
 const (
 	// DefaultVersion of Current REST API
-	DefaultVersion string = "1.32"
+	DefaultVersion string = "1.35"
 
 	// NoBaseImageSpecifier is the symbol used by the FROM
 	// command to specify that no base image is to be used.
 	NoBaseImageSpecifier string = "scratch"
 )
-
-// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
-// otherwise generates a new one
-func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
-	err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
-	if err != nil {
-		return nil, err
-	}
-	trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
-	if err == libtrust.ErrKeyFileDoesNotExist {
-		trustKey, err = libtrust.GenerateECP256PrivateKey()
-		if err != nil {
-			return nil, fmt.Errorf("Error generating key: %s", err)
-		}
-		encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
-		if err != nil {
-			return nil, fmt.Errorf("Error serializing key: %s", err)
-		}
-		if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
-			return nil, fmt.Errorf("Error saving key file: %s", err)
-		}
-	} else if err != nil {
-		return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
-	}
-	return trustKey, nil
-}
-
-func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
-	if ext == ".json" || ext == ".jwk" {
-		encoded, err = json.Marshal(key)
-		if err != nil {
-			return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
-		}
-	} else {
-		pemBlock, err := key.PEMBlock()
-		if err != nil {
-			return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
-		}
-		encoded = pem.EncodeToMemory(pemBlock)
-	}
-	return
-}
diff --git a/api/common_test.go b/api/common_test.go
deleted file mode 100644
index f466616..0000000
--- a/api/common_test.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package api
-
-import (
-	"io/ioutil"
-	"path/filepath"
-	"testing"
-
-	"os"
-)
-
-// LoadOrCreateTrustKey
-func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) {
-	tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmpKeyFolderPath)
-
-	tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile")
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil {
-		t.Fatal("expected an error, got nothing.")
-	}
-
-}
-
-func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) {
-	tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmpKeyFolderPath)
-
-	// Without the need to create the folder hierarchy
-	tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile")
-
-	if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
-		t.Fatalf("expected a new key file, got : %v and %v", err, key)
-	}
-
-	if _, err := os.Stat(tmpKeyFile); err != nil {
-		t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
-	}
-
-	// With the need to create the folder hierarchy as tmpKeyFie is in a path
-	// where some folders do not exist.
-	tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile")
-
-	if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
-		t.Fatalf("expected a new key file, got : %v and %v", err, key)
-	}
-
-	if _, err := os.Stat(tmpKeyFile); err != nil {
-		t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err)
-	}
-
-	// With no path at all
-	defer os.Remove("keyfile")
-	if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil {
-		t.Fatalf("expected a new key file, got : %v and %v", err, key)
-	}
-
-	if _, err := os.Stat("keyfile"); err != nil {
-		t.Fatalf("Expected to find a file keyfile, got %v", err)
-	}
-}
-
-func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) {
-	tmpKeyFile := filepath.Join("fixtures", "keyfile")
-
-	if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil {
-		t.Fatalf("expected a key file, got : %v and %v", err, key)
-	}
-}
diff --git a/api/errdefs/is.go b/api/errdefs/is.go
index ce574cd..b0be0b8 100644
--- a/api/errdefs/is.go
+++ b/api/errdefs/is.go
@@ -25,7 +25,7 @@
 	}
 }
 
-// IsNotFound returns if the passed in error is a ErrNotFound
+// IsNotFound returns if the passed in error is an ErrNotFound
 func IsNotFound(err error) bool {
 	_, ok := getImplementer(err).(ErrNotFound)
 	return ok
@@ -37,7 +37,7 @@
 	return ok
 }
 
-// IsConflict returns if the passed in error is a ErrConflict
+// IsConflict returns if the passed in error is an ErrConflict
 func IsConflict(err error) bool {
 	_, ok := getImplementer(err).(ErrConflict)
 	return ok
@@ -55,13 +55,13 @@
 	return ok
 }
 
-// IsForbidden returns if the passed in error is a ErrForbidden
+// IsForbidden returns if the passed in error is an ErrForbidden
 func IsForbidden(err error) bool {
 	_, ok := getImplementer(err).(ErrForbidden)
 	return ok
 }
 
-// IsSystem returns if the passed in error is a ErrSystem
+// IsSystem returns if the passed in error is an ErrSystem
 func IsSystem(err error) bool {
 	_, ok := getImplementer(err).(ErrSystem)
 	return ok
@@ -73,7 +73,7 @@
 	return ok
 }
 
-// IsNotImplemented returns if the passed in error is a ErrNotImplemented
+// IsNotImplemented returns if the passed in error is an ErrNotImplemented
 func IsNotImplemented(err error) bool {
 	_, ok := getImplementer(err).(ErrNotImplemented)
 	return ok
diff --git a/api/names.go b/api/names.go
deleted file mode 100644
index f147d1f..0000000
--- a/api/names.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package api
-
-import "regexp"
-
-// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
-const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
-
-// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
-var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
diff --git a/api/server/httputils/form.go b/api/server/httputils/form.go
index a5f6228..1ce822e 100644
--- a/api/server/httputils/form.go
+++ b/api/server/httputils/form.go
@@ -2,7 +2,6 @@
 
 import (
 	"net/http"
-	"path/filepath"
 	"strconv"
 	"strings"
 )
@@ -69,8 +68,7 @@
 	if name == "" {
 		return ArchiveOptions{}, badParameterError{"name"}
 	}
-
-	path := filepath.FromSlash(r.Form.Get("path"))
+	path := r.Form.Get("path")
 	if path == "" {
 		return ArchiveOptions{}, badParameterError{"path"}
 	}
diff --git a/api/server/httputils/write_log_stream.go b/api/server/httputils/write_log_stream.go
index fd024e1..d7a49a1 100644
--- a/api/server/httputils/write_log_stream.go
+++ b/api/server/httputils/write_log_stream.go
@@ -11,26 +11,19 @@
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/stdcopy"
 )
 
 // WriteLogStream writes an encoded byte stream of log messages from the
 // messages channel, multiplexing them with a stdcopy.Writer if mux is true
-func WriteLogStream(ctx context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
+func WriteLogStream(_ context.Context, w io.Writer, msgs <-chan *backend.LogMessage, config *types.ContainerLogsOptions, mux bool) {
 	wf := ioutils.NewWriteFlusher(w)
 	defer wf.Close()
 
 	wf.Flush()
 
-	// this might seem like doing below is clear:
-	//   var outStream io.Writer = wf
-	// however, this GREATLY DISPLEASES golint, and if you do that, it will
-	// fail CI. we need outstream to be type writer because if we mux streams,
-	// we will need to reassign all of the streams to be stdwriters, which only
-	// conforms to the io.Writer interface.
-	var outStream io.Writer
-	outStream = wf
+	outStream := io.Writer(wf)
 	errStream := outStream
 	sysErrStream := errStream
 	if mux {
@@ -56,11 +49,7 @@
 			logLine = append(logLine, msg.Line...)
 		}
 		if config.Timestamps {
-			// TODO(dperny) the format is defined in
-			// daemon/logger/logger.go as logger.TimeFormat. importing
-			// logger is verboten (not part of backend) so idk if just
-			// importing the same thing from jsonlog is good enough
-			logLine = append([]byte(msg.Timestamp.Format(jsonlog.RFC3339NanoFixed)+" "), logLine...)
+			logLine = append([]byte(msg.Timestamp.Format(jsonmessage.RFC3339NanoFixed)+" "), logLine...)
 		}
 		if msg.Source == "stdout" && config.ShowStdout {
 			outStream.Write(logLine)
diff --git a/api/server/middleware/version.go b/api/server/middleware/version.go
index ef35c72..6795529 100644
--- a/api/server/middleware/version.go
+++ b/api/server/middleware/version.go
@@ -28,11 +28,14 @@
 }
 
 type versionUnsupportedError struct {
-	version, minVersion string
+	version, minVersion, maxVersion string
 }
 
 func (e versionUnsupportedError) Error() string {
-	return fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", e.version, e.minVersion)
+	if e.minVersion != "" {
+		return fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", e.version, e.minVersion)
+	}
+	return fmt.Sprintf("client version %s is too new. Maximum supported API version is %s", e.version, e.maxVersion)
 }
 
 func (e versionUnsupportedError) InvalidParameter() {}
@@ -40,19 +43,20 @@
 // WrapHandler returns a new handler function wrapping the previous one in the request chain.
 func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+		w.Header().Set("Server", fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS))
+		w.Header().Set("API-Version", v.defaultVersion)
+		w.Header().Set("OSType", runtime.GOOS)
+
 		apiVersion := vars["version"]
 		if apiVersion == "" {
 			apiVersion = v.defaultVersion
 		}
-
 		if versions.LessThan(apiVersion, v.minVersion) {
-			return versionUnsupportedError{apiVersion, v.minVersion}
+			return versionUnsupportedError{version: apiVersion, minVersion: v.minVersion}
 		}
-
-		header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS)
-		w.Header().Set("Server", header)
-		w.Header().Set("API-Version", v.defaultVersion)
-		w.Header().Set("OSType", runtime.GOOS)
+		if versions.GreaterThan(apiVersion, v.defaultVersion) {
+			return versionUnsupportedError{version: apiVersion, maxVersion: v.defaultVersion}
+		}
 		// nolint: golint
 		ctx = context.WithValue(ctx, "api-version", apiVersion)
 		return handler(ctx, w, r, vars)
diff --git a/api/server/middleware/version_test.go b/api/server/middleware/version_test.go
index 29787bf..9cf7cc2 100644
--- a/api/server/middleware/version_test.go
+++ b/api/server/middleware/version_test.go
@@ -3,10 +3,12 @@
 import (
 	"net/http"
 	"net/http/httptest"
+	"runtime"
 	"strings"
 	"testing"
 
 	"github.com/docker/docker/api/server/httputils"
+	"github.com/stretchr/testify/assert"
 	"golang.org/x/net/context"
 )
 
@@ -31,7 +33,7 @@
 	}
 }
 
-func TestVersionMiddlewareWithErrors(t *testing.T) {
+func TestVersionMiddlewareVersionTooOld(t *testing.T) {
 	handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 		if httputils.VersionFromContext(ctx) == "" {
 			t.Fatal("Expected version, got empty string")
@@ -55,3 +57,56 @@
 		t.Fatalf("Expected too old client error, got %v", err)
 	}
 }
+
+func TestVersionMiddlewareVersionTooNew(t *testing.T) {
+	handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+		if httputils.VersionFromContext(ctx) == "" {
+			t.Fatal("Expected version, got empty string")
+		}
+		return nil
+	}
+
+	defaultVersion := "1.10.0"
+	minVersion := "1.2.0"
+	m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
+	h := m.WrapHandler(handler)
+
+	req, _ := http.NewRequest("GET", "/containers/json", nil)
+	resp := httptest.NewRecorder()
+	ctx := context.Background()
+
+	vars := map[string]string{"version": "9999.9999"}
+	err := h(ctx, resp, req, vars)
+
+	if !strings.Contains(err.Error(), "client version 9999.9999 is too new. Maximum supported API version is 1.10.0") {
+		t.Fatalf("Expected too new client error, got %v", err)
+	}
+}
+
+func TestVersionMiddlewareWithErrorsReturnsHeaders(t *testing.T) {
+	handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+		if httputils.VersionFromContext(ctx) == "" {
+			t.Fatal("Expected version, got empty string")
+		}
+		return nil
+	}
+
+	defaultVersion := "1.10.0"
+	minVersion := "1.2.0"
+	m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion)
+	h := m.WrapHandler(handler)
+
+	req, _ := http.NewRequest("GET", "/containers/json", nil)
+	resp := httptest.NewRecorder()
+	ctx := context.Background()
+
+	vars := map[string]string{"version": "0.1"}
+	err := h(ctx, resp, req, vars)
+
+	assert.Error(t, err)
+	hdr := resp.Result().Header
+	assert.Contains(t, hdr.Get("Server"), "Docker/"+defaultVersion)
+	assert.Contains(t, hdr.Get("Server"), runtime.GOOS)
+	assert.Equal(t, hdr.Get("API-Version"), defaultVersion)
+	assert.Equal(t, hdr.Get("OSType"), runtime.GOOS)
+}
diff --git a/api/server/router/build/build_routes.go b/api/server/router/build/build_routes.go
index 2c9a947..fefb875 100644
--- a/api/server/router/build/build_routes.go
+++ b/api/server/router/build/build_routes.go
@@ -7,6 +7,7 @@
 	"fmt"
 	"io"
 	"net/http"
+	"os"
 	"runtime"
 	"strconv"
 	"strings"
@@ -20,6 +21,7 @@
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/streamformatter"
+	"github.com/docker/docker/pkg/system"
 	units "github.com/docker/go-units"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -67,6 +69,24 @@
 	options.Squash = httputils.BoolValue(r, "squash")
 	options.Target = r.FormValue("target")
 	options.RemoteContext = r.FormValue("remote")
+	if versions.GreaterThanOrEqualTo(version, "1.32") {
+		// TODO @jhowardmsft. The following environment variable is an interim
+		// measure to allow the daemon to have a default platform if omitted by
+		// the client. This allows LCOW and WCOW to work with a down-level CLI
+		// for a short period of time, as the CLI changes can't be merged
+		// until after the daemon changes have been merged. Once the CLI is
+		// updated, this can be removed. PR for CLI is currently in
+		// https://github.com/docker/cli/pull/474.
+		apiPlatform := r.FormValue("platform")
+		if system.LCOWSupported() && apiPlatform == "" {
+			apiPlatform = os.Getenv("LCOW_API_PLATFORM_IF_OMITTED")
+		}
+		p := system.ParsePlatform(apiPlatform)
+		if err := system.ValidatePlatform(p); err != nil {
+			return nil, validationError{fmt.Errorf("invalid platform: %s", err)}
+		}
+		options.Platform = p.OS
+	}
 
 	if r.Form.Get("shmsize") != "" {
 		shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64)
diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go
index 30fd3a1..d845fdd 100644
--- a/api/server/router/container/container_routes.go
+++ b/api/server/router/container/container_routes.go
@@ -28,7 +28,7 @@
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 	}
-	filter, err := filters.FromParam(r.Form.Get("filters"))
+	filter, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
@@ -96,6 +96,7 @@
 		Follow:     httputils.BoolValue(r, "follow"),
 		Timestamps: httputils.BoolValue(r, "timestamps"),
 		Since:      r.Form.Get("since"),
+		Until:      r.Form.Get("until"),
 		Tail:       r.Form.Get("tail"),
 		ShowStdout: stdout,
 		ShowStderr: stderr,
@@ -280,11 +281,12 @@
 	// Behavior changed in version 1.30 to handle wait condition and to
 	// return headers immediately.
 	version := httputils.VersionFromContext(ctx)
-	legacyBehavior := versions.LessThan(version, "1.30")
+	legacyBehaviorPre130 := versions.LessThan(version, "1.30")
+	legacyRemovalWaitPre134 := false
 
 	// The wait condition defaults to "not-running".
 	waitCondition := containerpkg.WaitConditionNotRunning
-	if !legacyBehavior {
+	if !legacyBehaviorPre130 {
 		if err := httputils.ParseForm(r); err != nil {
 			return err
 		}
@@ -293,6 +295,7 @@
 			waitCondition = containerpkg.WaitConditionNextExit
 		case container.WaitConditionRemoved:
 			waitCondition = containerpkg.WaitConditionRemoved
+			legacyRemovalWaitPre134 = versions.LessThan(version, "1.34")
 		}
 	}
 
@@ -306,7 +309,7 @@
 
 	w.Header().Set("Content-Type", "application/json")
 
-	if !legacyBehavior {
+	if !legacyBehaviorPre130 {
 		// Write response header immediately.
 		w.WriteHeader(http.StatusOK)
 		if flusher, ok := w.(http.Flusher); ok {
@@ -317,8 +320,22 @@
 	// Block on the result of the wait operation.
 	status := <-waitC
 
+	// With API < 1.34, wait on WaitConditionRemoved did not return
+	// in case container removal failed. The only way to report an
+	// error back to the client is to not write anything (i.e. send
+	// an empty response which will be treated as an error).
+	if legacyRemovalWaitPre134 && status.Err() != nil {
+		return nil
+	}
+
+	var waitError *container.ContainerWaitOKBodyError
+	if status.Err() != nil {
+		waitError = &container.ContainerWaitOKBodyError{Message: status.Err().Error()}
+	}
+
 	return json.NewEncoder(w).Encode(&container.ContainerWaitOKBody{
 		StatusCode: int64(status.ExitCode()),
+		Error:      waitError,
 	})
 }
 
@@ -588,7 +605,7 @@
 		return err
 	}
 
-	pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
+	pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return validationError{err}
 	}
diff --git a/api/server/router/container/exec.go b/api/server/router/container/exec.go
index aa2ebb1..97c27d8 100644
--- a/api/server/router/container/exec.go
+++ b/api/server/router/container/exec.go
@@ -126,7 +126,7 @@
 			return err
 		}
 		stdout.Write([]byte(err.Error() + "\r\n"))
-		logrus.Errorf("Error running exec in container: %v", err)
+		logrus.Errorf("Error running exec %s in container: %v", execName, err)
 	}
 	return nil
 }
diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go
index 86d73df..dabab3b 100644
--- a/api/server/router/image/image_routes.go
+++ b/api/server/router/image/image_routes.go
@@ -3,9 +3,10 @@
 import (
 	"encoding/base64"
 	"encoding/json"
+	"fmt"
 	"io"
 	"net/http"
-	"runtime"
+	"os"
 	"strconv"
 	"strings"
 
@@ -19,6 +20,7 @@
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/registry"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 )
@@ -71,83 +73,70 @@
 
 // Creates an image from Pull or from Import
 func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
+
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 	}
 
 	var (
-		image   = r.Form.Get("fromImage")
-		repo    = r.Form.Get("repo")
-		tag     = r.Form.Get("tag")
-		message = r.Form.Get("message")
-		err     error
-		output  = ioutils.NewWriteFlusher(w)
+		image    = r.Form.Get("fromImage")
+		repo     = r.Form.Get("repo")
+		tag      = r.Form.Get("tag")
+		message  = r.Form.Get("message")
+		err      error
+		output   = ioutils.NewWriteFlusher(w)
+		platform = &specs.Platform{}
 	)
 	defer output.Close()
 
-	// TODO @jhowardmsft LCOW Support: Eventually we will need an API change
-	// so that platform comes from (for example) r.Form.Get("platform"). For
-	// the initial implementation, we assume that the platform is the
-	// runtime OS of the host. It will also need a validation function such
-	// as below which should be called after getting it from the API.
-	//
-	// Ensures the requested platform is valid and normalized
-	//func validatePlatform(req string) (string, error) {
-	//	req = strings.ToLower(req)
-	//	if req == "" {
-	//		req = runtime.GOOS // default to host platform
-	//	}
-	//	valid := []string{runtime.GOOS}
-	//
-	//	if system.LCOWSupported() {
-	//		valid = append(valid, "linux")
-	//	}
-	//
-	//	for _, item := range valid {
-	//		if req == item {
-	//			return req, nil
-	//		}
-	//	}
-	//	return "", fmt.Errorf("invalid platform requested: %s", req)
-	//}
-	//
-	// And in the call-site:
-	//	if platform, err = validatePlatform(platform); err != nil {
-	//		return err
-	//	}
-	platform := runtime.GOOS
-	if system.LCOWSupported() {
-		platform = "linux"
-	}
-
 	w.Header().Set("Content-Type", "application/json")
 
-	if image != "" { //pull
-		metaHeaders := map[string][]string{}
-		for k, v := range r.Header {
-			if strings.HasPrefix(k, "X-Meta-") {
-				metaHeaders[k] = v
-			}
+	version := httputils.VersionFromContext(ctx)
+	if versions.GreaterThanOrEqualTo(version, "1.32") {
+		// TODO @jhowardmsft. The following environment variable is an interim
+		// measure to allow the daemon to have a default platform if omitted by
+		// the client. This allows LCOW and WCOW to work with a down-level CLI
+		// for a short period of time, as the CLI changes can't be merged
+		// until after the daemon changes have been merged. Once the CLI is
+		// updated, this can be removed. PR for CLI is currently in
+		// https://github.com/docker/cli/pull/474.
+		apiPlatform := r.FormValue("platform")
+		if system.LCOWSupported() && apiPlatform == "" {
+			apiPlatform = os.Getenv("LCOW_API_PLATFORM_IF_OMITTED")
 		}
-
-		authEncoded := r.Header.Get("X-Registry-Auth")
-		authConfig := &types.AuthConfig{}
-		if authEncoded != "" {
-			authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
-			if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
-				// for a pull it is not an error if no auth was given
-				// to increase compatibility with the existing api it is defaulting to be empty
-				authConfig = &types.AuthConfig{}
-			}
+		platform = system.ParsePlatform(apiPlatform)
+		if err = system.ValidatePlatform(platform); err != nil {
+			err = fmt.Errorf("invalid platform: %s", err)
 		}
+	}
 
-		err = s.backend.PullImage(ctx, image, tag, platform, metaHeaders, authConfig, output)
-	} else { //import
-		src := r.Form.Get("fromSrc")
-		// 'err' MUST NOT be defined within this block, we need any error
-		// generated from the download to be available to the output
-		// stream processing below
-		err = s.backend.ImportImage(src, repo, platform, tag, message, r.Body, output, r.Form["changes"])
+	if err == nil {
+		if image != "" { //pull
+			metaHeaders := map[string][]string{}
+			for k, v := range r.Header {
+				if strings.HasPrefix(k, "X-Meta-") {
+					metaHeaders[k] = v
+				}
+			}
+
+			authEncoded := r.Header.Get("X-Registry-Auth")
+			authConfig := &types.AuthConfig{}
+			if authEncoded != "" {
+				authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded))
+				if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil {
+					// for a pull it is not an error if no auth was given
+					// to increase compatibility with the existing api it is defaulting to be empty
+					authConfig = &types.AuthConfig{}
+				}
+			}
+			err = s.backend.PullImage(ctx, image, tag, platform.OS, metaHeaders, authConfig, output)
+		} else { //import
+			src := r.Form.Get("fromSrc")
+			// 'err' MUST NOT be defined within this block, we need any error
+			// generated from the download to be available to the output
+			// stream processing below
+			err = s.backend.ImportImage(src, repo, platform.OS, tag, message, r.Body, output, r.Form["changes"])
+		}
 	}
 	if err != nil {
 		if !output.Flushed() {
@@ -302,7 +291,7 @@
 		return err
 	}
 
-	imageFilters, err := filters.FromParam(r.Form.Get("filters"))
+	imageFilters, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
@@ -385,7 +374,7 @@
 		return err
 	}
 
-	pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
+	pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
diff --git a/api/server/router/network/filter.go b/api/server/router/network/filter.go
index 21fc828..6a5fce6 100644
--- a/api/server/router/network/filter.go
+++ b/api/server/router/network/filter.go
@@ -45,27 +45,27 @@
 
 	displayNet := []types.NetworkResource{}
 	for _, nw := range nws {
-		if filter.Include("driver") {
+		if filter.Contains("driver") {
 			if !filter.ExactMatch("driver", nw.Driver) {
 				continue
 			}
 		}
-		if filter.Include("name") {
+		if filter.Contains("name") {
 			if !filter.Match("name", nw.Name) {
 				continue
 			}
 		}
-		if filter.Include("id") {
+		if filter.Contains("id") {
 			if !filter.Match("id", nw.ID) {
 				continue
 			}
 		}
-		if filter.Include("label") {
+		if filter.Contains("label") {
 			if !filter.MatchKVList("label", nw.Labels) {
 				continue
 			}
 		}
-		if filter.Include("scope") {
+		if filter.Contains("scope") {
 			if !filter.ExactMatch("scope", nw.Scope) {
 				continue
 			}
@@ -73,7 +73,7 @@
 		displayNet = append(displayNet, nw)
 	}
 
-	if filter.Include("type") {
+	if filter.Contains("type") {
 		typeNet := []types.NetworkResource{}
 		errFilter := filter.WalkValues("type", func(fval string) error {
 			passList, err := filterNetworkByType(displayNet, fval)
diff --git a/api/server/router/network/network_routes.go b/api/server/router/network/network_routes.go
index ad3e74e..a67bb9f 100644
--- a/api/server/router/network/network_routes.go
+++ b/api/server/router/network/network_routes.go
@@ -37,7 +37,7 @@
 	}
 
 	filter := r.Form.Get("filters")
-	netFilters, err := filters.FromParam(filter)
+	netFilters, err := filters.FromJSON(filter)
 	if err != nil {
 		return err
 	}
@@ -100,6 +100,24 @@
 
 func (ambigousResultsError) InvalidParameter() {}
 
+type conflictError struct {
+	cause error
+}
+
+func (e conflictError) Error() string {
+	return e.cause.Error()
+}
+
+func (e conflictError) Cause() error {
+	return e.cause
+}
+
+func (e conflictError) Conflict() {}
+
+func nameConflict(name string) error {
+	return conflictError{libnetwork.NetworkNameError(name)}
+}
+
 func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := httputils.ParseForm(r); err != nil {
 		return err
@@ -225,7 +243,7 @@
 	}
 
 	if nws, err := n.cluster.GetNetworksByName(create.Name); err == nil && len(nws) > 0 {
-		return libnetwork.NetworkNameError(create.Name)
+		return nameConflict(create.Name)
 	}
 
 	nw, err := n.backend.CreateNetwork(create)
@@ -235,7 +253,7 @@
 			// check if user defined CheckDuplicate, if set true, return err
 			// otherwise prepare a warning message
 			if create.CheckDuplicate {
-				return libnetwork.NetworkNameError(create.Name)
+				return nameConflict(create.Name)
 			}
 			warning = libnetwork.NetworkNameError(create.Name).Error()
 		}
@@ -489,7 +507,7 @@
 		return err
 	}
 
-	pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
+	pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
diff --git a/api/server/router/plugin/plugin_routes.go b/api/server/router/plugin/plugin_routes.go
index 79e3cf5..d74a01f 100644
--- a/api/server/router/plugin/plugin_routes.go
+++ b/api/server/router/plugin/plugin_routes.go
@@ -290,7 +290,7 @@
 		return err
 	}
 
-	pluginFilters, err := filters.FromParam(r.Form.Get("filters"))
+	pluginFilters, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go
index 7bd3aff..5b6c19d 100644
--- a/api/server/router/swarm/cluster_routes.go
+++ b/api/server/router/swarm/cluster_routes.go
@@ -151,7 +151,7 @@
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 	}
-	filter, err := filters.FromParam(r.Form.Get("filters"))
+	filter, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return invalidRequestError{err}
 	}
@@ -277,7 +277,7 @@
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 	}
-	filter, err := filters.FromParam(r.Form.Get("filters"))
+	filter, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
@@ -339,7 +339,7 @@
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 	}
-	filter, err := filters.FromParam(r.Form.Get("filters"))
+	filter, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
@@ -367,7 +367,7 @@
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 	}
-	filters, err := filters.FromParam(r.Form.Get("filters"))
+	filters, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
@@ -427,18 +427,14 @@
 	}
 
 	id := vars["id"]
-	if err := sr.backend.UpdateSecret(id, version, secret); err != nil {
-		return err
-	}
-
-	return nil
+	return sr.backend.UpdateSecret(id, version, secret)
 }
 
 func (sr *swarmRouter) getConfigs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
 	if err := httputils.ParseForm(r); err != nil {
 		return err
 	}
-	filters, err := filters.FromParam(r.Form.Get("filters"))
+	filters, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
@@ -498,9 +494,5 @@
 	}
 
 	id := vars["id"]
-	if err := sr.backend.UpdateConfig(id, version, config); err != nil {
-		return err
-	}
-
-	return nil
+	return sr.backend.UpdateConfig(id, version, config)
 }
diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go
index 5884388..8f6aecd 100644
--- a/api/server/router/system/system_routes.go
+++ b/api/server/router/system/system_routes.go
@@ -123,11 +123,11 @@
 
 		if !onlyPastEvents {
 			dur := until.Sub(now)
-			timeout = time.NewTimer(dur).C
+			timeout = time.After(dur)
 		}
 	}
 
-	ef, err := filters.FromParam(r.Form.Get("filters"))
+	ef, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
diff --git a/api/server/router/volume/volume_routes.go b/api/server/router/volume/volume_routes.go
index f0f4901..bfff51a 100644
--- a/api/server/router/volume/volume_routes.go
+++ b/api/server/router/volume/volume_routes.go
@@ -72,7 +72,7 @@
 		return err
 	}
 
-	pruneFilters, err := filters.FromParam(r.Form.Get("filters"))
+	pruneFilters, err := filters.FromJSON(r.Form.Get("filters"))
 	if err != nil {
 		return err
 	}
diff --git a/api/server/server.go b/api/server/server.go
index 7ba8a6c..bf3774b 100644
--- a/api/server/server.go
+++ b/api/server/server.go
@@ -23,7 +23,6 @@
 // Config provides the configuration for the API server
 type Config struct {
 	Logging     bool
-	EnableCors  bool
 	CorsHeaders string
 	Version     string
 	SocketGroup string
diff --git a/api/swagger.yaml b/api/swagger.yaml
index 0c9ee95..18b26bb 100644
--- a/api/swagger.yaml
+++ b/api/swagger.yaml
@@ -19,10 +19,10 @@
 consumes:
   - "application/json"
   - "text/plain"
-basePath: "/v1.32"
+basePath: "/v1.35"
 info:
   title: "Docker Engine API"
-  version: "1.32"
+  version: "1.35"
   x-logo:
     url: "https://docs.docker.com/images/logo-docker-main.png"
   description: |
@@ -42,34 +42,26 @@
 
     # Versioning
 
-    The API is usually changed in each release of Docker, so API calls are versioned to ensure that clients don't break.
+    The API is usually changed in each release, so API calls are versioned to
+    ensure that clients don't break. To lock to a specific version of the API,
+    you prefix the URL with its version, for example, call `/v1.30/info` to use
+    the v1.30 version of the `/info` endpoint. If the API version specified in
+    the URL is not supported by the daemon, a HTTP `400 Bad Request` error message
+    is returned.
 
-    For Docker Engine 17.07, the API version is 1.31. To lock to this version, you prefix the URL with `/v1.31`. For example, calling `/info` is the same as calling `/v1.31/info`.
+    If you omit the version-prefix, the current version of the API (v1.35) is used.
+    For example, calling `/info` is the same as calling `/v1.35/info`. Using the
+    API without a version-prefix is deprecated and will be removed in a future release.
 
-    Engine releases in the near future should support this version of the API, so your client will continue to work even if it is talking to a newer Engine.
+    Engine releases in the near future should support this version of the API,
+    so your client will continue to work even if it is talking to a newer Engine.
 
-    In previous versions of Docker, it was possible to access the API without providing a version. This behaviour is now deprecated will be removed in a future version of Docker.
+    The API uses an open schema model, which means server may add extra properties
+    to responses. Likewise, the server will ignore any extra query parameters and
+    request body properties. When you write clients, you need to ignore additional
+    properties in responses to ensure they do not break when talking to newer
+    daemons.
 
-    The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons.
-
-    This documentation is for version 1.32 of the API. Use this table to find documentation for previous versions of the API:
-
-    Docker version  | API version | Changes
-    ----------------|-------------|---------
-    17.07.x | [1.31](https://docs.docker.com/engine/api/v1.31/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-31-api-changes)
-    17.06.x | [1.30](https://docs.docker.com/engine/api/v1.30/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-30-api-changes)
-    17.05.x | [1.29](https://docs.docker.com/engine/api/v1.29/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-29-api-changes)
-    17.04.x | [1.28](https://docs.docker.com/engine/api/v1.28/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-28-api-changes)
-    17.03.1 | [1.27](https://docs.docker.com/engine/api/v1.27/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-27-api-changes)
-    1.13.1 & 17.03.0 | [1.26](https://docs.docker.com/engine/api/v1.26/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-26-api-changes)
-    1.13.0 | [1.25](https://docs.docker.com/engine/api/v1.25/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-25-api-changes)
-    1.12.x | [1.24](https://docs.docker.com/engine/api/v1.24/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-24-api-changes)
-    1.11.x | [1.23](https://docs.docker.com/engine/api/v1.23/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-23-api-changes)
-    1.10.x | [1.22](https://docs.docker.com/engine/api/v1.22/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-22-api-changes)
-    1.9.x | [1.21](https://docs.docker.com/engine/api/v1.21/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-21-api-changes)
-    1.8.x | [1.20](https://docs.docker.com/engine/api/v1.20/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-20-api-changes)
-    1.7.x | [1.19](https://docs.docker.com/engine/api/v1.19/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-19-api-changes)
-    1.6.x | [1.18](https://docs.docker.com/engine/api/v1.18/) | [API changes](https://docs.docker.com/engine/api/version-history/#v1-18-api-changes)
 
     # Authentication
 
@@ -144,6 +136,10 @@
     x-displayName: "Secrets"
     description: |
       Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work.
+  - name: "Config"
+    x-displayName: "Configs"
+    description: |
+      Configs are application configurations that can be used by services. Swarm mode must be enabled for these endpoints to work.
   # System things
   - name: "Plugin"
     x-displayName: "Plugins"
@@ -714,7 +710,15 @@
             description: "Gives the container full access to the host."
           PublishAllPorts:
             type: "boolean"
-            description: "Allocates a random host port for all of a container's exposed ports."
+            description: |
+              Allocates an ephemeral host port for all of a container's
+              exposed ports.
+
+              Ports are de-allocated when the container stops and allocated when the container starts.
+              The allocated port might be changed when restarting the container.
+
+              The port is selected from the ephemeral port range that depends on the kernel.
+              For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
           ReadonlyRootfs:
             type: "boolean"
             description: "Mount the container's root filesystem as read only."
@@ -2672,7 +2676,13 @@
                     ConfigName is the name of the config that this references, but this is just provided for
                     lookup/display purposes. The config in the reference will be identified by its ID.
                   type: "string"
-
+          Isolation:
+            type: "string"
+            description: "Isolation technology of the containers running the service. (Windows only)"
+            enum:
+              - "default"
+              - "process"
+              - "hyperv"
       Resources:
         description: "Resource requirements which apply to each individual container created as part of the service."
         type: "object"
@@ -3036,6 +3046,24 @@
       PublishedPort:
         description: "The port on the swarm hosts."
         type: "integer"
+      PublishMode:
+        description: |
+          The mode in which port is published.
+
+          <p><br /></p>
+
+          - "ingress" makes the target port accessible on on every node,
+            regardless of whether there is a task for the service running on
+            that node or not.
+          - "host" bypasses the routing mesh and publish the port directly on
+            the swarm node where that service is running.
+
+        type: "string"
+        enum:
+          - "ingress"
+          - "host"
+        default: "ingress"
+        example: "ingress"
 
   EndpointSpec:
     description: "Properties that can be configured to access and load balance a service."
@@ -4929,6 +4957,11 @@
           description: "Only return logs since this time, as a UNIX timestamp"
           type: "integer"
           default: 0
+        - name: "until"
+          in: "query"
+          description: "Only return logs before this time, as a UNIX timestamp"
+          type: "integer"
+          default: 0
         - name: "timestamps"
           in: "query"
           description: "Add timestamps to every log line"
@@ -5697,6 +5730,13 @@
                 description: "Exit code of the container"
                 type: "integer"
                 x-nullable: false
+              Error:
+                description: "container waiting error, if any"
+                type: "object"
+                properties:
+                  Message:
+                    description: "Details of an error"
+                    type: "string"
         404:
           description: "no such container"
           schema:
@@ -6157,6 +6197,11 @@
 
             Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API.
           type: "string"
+        - name: "platform"
+          in: "query"
+          description: "Platform in the format os[/arch[/variant]]"
+          type: "string"
+          default: ""
       responses:
         200:
           description: "no error"
@@ -6238,6 +6283,11 @@
           in: "header"
           description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)"
           type: "string"
+        - name: "platform"
+          in: "query"
+          description: "Platform in the format os[/arch[/variant]]"
+          type: "string"
+          default: ""
       tags: ["Image"]
   /images/{name}/json:
     get:
@@ -6936,16 +6986,20 @@
           description: |
             A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters:
 
+            - `config=<string>` config name or ID
             - `container=<string>` container name or ID
             - `daemon=<string>` daemon name or ID
             - `event=<string>` event type
             - `image=<string>` image name or ID
             - `label=<string>` image or container label
             - `network=<string>` network name or ID
+            - `node=<string>` node ID
             - `plugin`=<string> plugin name or ID
             - `scope`=<string> local or swarm
-            - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service` or `secret`
-            - `volume=<string>` volume name or ID
+            - `secret=<string>` secret name or ID
+            - `service=<string>` service name or ID
+            - `type=<string>` object to filter by, one of `container`, `image`, `volume`, `network`, `daemon`, `plugin`, `node`, `service`, `secret` or `config`
+            - `volume=<string>` volume name
           type: "string"
       tags: ["System"]
   /system/df:
diff --git a/api/types/backend/build.go b/api/types/backend/build.go
index 300d358..8df3b3e 100644
--- a/api/types/backend/build.go
+++ b/api/types/backend/build.go
@@ -40,5 +40,5 @@
 	PullOption PullOption
 	AuthConfig map[string]types.AuthConfig
 	Output     io.Writer
-	Platform   string
+	OS         string
 }
diff --git a/api/types/client.go b/api/types/client.go
index 18a1263..93ca428 100644
--- a/api/types/client.go
+++ b/api/types/client.go
@@ -74,6 +74,7 @@
 	ShowStdout bool
 	ShowStderr bool
 	Since      string
+	Until      string
 	Timestamps bool
 	Follow     bool
 	Tail       string
@@ -179,10 +180,7 @@
 	ExtraHosts  []string // List of extra hosts
 	Target      string
 	SessionID   string
-
-	// TODO @jhowardmsft LCOW Support: This will require extending to include
-	// `Platform string`, but is ommited for now as it's hard-coded temporarily
-	// to avoid API changes.
+	Platform    string
 }
 
 // ImageBuildResponse holds information
@@ -195,7 +193,8 @@
 
 // ImageCreateOptions holds information to create images.
 type ImageCreateOptions struct {
-	RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
+	RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry.
+	Platform     string // Platform is the target platform of the image if it needs to be pulled from the registry.
 }
 
 // ImageImportSource holds source information for ImageImport
@@ -206,9 +205,10 @@
 
 // ImageImportOptions holds information to import images from the client host.
 type ImageImportOptions struct {
-	Tag     string   // Tag is the name to tag this image with. This attribute is deprecated.
-	Message string   // Message is the message to tag the image with
-	Changes []string // Changes are the raw changes to apply to this image
+	Tag      string   // Tag is the name to tag this image with. This attribute is deprecated.
+	Message  string   // Message is the message to tag the image with
+	Changes  []string // Changes are the raw changes to apply to this image
+	Platform string   // Platform is the target platform of the image
 }
 
 // ImageListOptions holds parameters to filter the list of images with.
@@ -229,6 +229,7 @@
 	All           bool
 	RegistryAuth  string // RegistryAuth is the base64 encoded credentials for the registry
 	PrivilegeFunc RequestPrivilegeFunc
+	Platform      string
 }
 
 // RequestPrivilegeFunc is a function interface that
diff --git a/api/types/configs.go b/api/types/configs.go
index e4d2ce6..20c19f2 100644
--- a/api/types/configs.go
+++ b/api/types/configs.go
@@ -16,7 +16,6 @@
 	HostConfig       *container.HostConfig
 	NetworkingConfig *network.NetworkingConfig
 	AdjustCPUShares  bool
-	Platform         string
 }
 
 // ContainerRmConfig holds arguments for the container remove
diff --git a/api/types/container/container_wait.go b/api/types/container/container_wait.go
index 77ecdba..47fb175 100644
--- a/api/types/container/container_wait.go
+++ b/api/types/container/container_wait.go
@@ -7,10 +7,22 @@
 // See hack/generate-swagger-api.sh
 // ----------------------------------------------------------------------------
 
+// ContainerWaitOKBodyError container waiting error, if any
+// swagger:model ContainerWaitOKBodyError
+type ContainerWaitOKBodyError struct {
+
+	// Details of an error
+	Message string `json:"Message,omitempty"`
+}
+
 // ContainerWaitOKBody container wait o k body
 // swagger:model ContainerWaitOKBody
 type ContainerWaitOKBody struct {
 
+	// error
+	// Required: true
+	Error *ContainerWaitOKBodyError `json:"Error"`
+
 	// Exit code of the container
 	// Required: true
 	StatusCode int64 `json:"StatusCode"`
diff --git a/api/types/container/host_config.go b/api/types/container/host_config.go
index bb421b3..568cdcc 100644
--- a/api/types/container/host_config.go
+++ b/api/types/container/host_config.go
@@ -20,6 +20,27 @@
 	return strings.ToLower(string(i)) == "default" || string(i) == ""
 }
 
+// IsHyperV indicates the use of a Hyper-V partition for isolation
+func (i Isolation) IsHyperV() bool {
+	return strings.ToLower(string(i)) == "hyperv"
+}
+
+// IsProcess indicates the use of process isolation
+func (i Isolation) IsProcess() bool {
+	return strings.ToLower(string(i)) == "process"
+}
+
+const (
+	// IsolationEmpty is unspecified (same behavior as default)
+	IsolationEmpty = Isolation("")
+	// IsolationDefault is the default isolation mode on current daemon
+	IsolationDefault = Isolation("default")
+	// IsolationProcess is process isolation mode
+	IsolationProcess = Isolation("process")
+	// IsolationHyperV is HyperV isolation mode
+	IsolationHyperV = Isolation("hyperv")
+)
+
 // IpcMode represents the container ipc stack.
 type IpcMode string
 
diff --git a/api/types/container/hostconfig_windows.go b/api/types/container/hostconfig_windows.go
index 469923f..3374d73 100644
--- a/api/types/container/hostconfig_windows.go
+++ b/api/types/container/hostconfig_windows.go
@@ -1,9 +1,5 @@
 package container
 
-import (
-	"strings"
-)
-
 // IsBridge indicates whether container uses the bridge network stack
 // in windows it is given the name NAT
 func (n NetworkMode) IsBridge() bool {
@@ -21,16 +17,6 @@
 	return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer()
 }
 
-// IsHyperV indicates the use of a Hyper-V partition for isolation
-func (i Isolation) IsHyperV() bool {
-	return strings.ToLower(string(i)) == "hyperv"
-}
-
-// IsProcess indicates the use of process isolation
-func (i Isolation) IsProcess() bool {
-	return strings.ToLower(string(i)) == "process"
-}
-
 // IsValid indicates if an isolation technology is valid
 func (i Isolation) IsValid() bool {
 	return i.IsDefault() || i.IsHyperV() || i.IsProcess()
diff --git a/api/types/filters/example_test.go b/api/types/filters/example_test.go
new file mode 100644
index 0000000..6529be3
--- /dev/null
+++ b/api/types/filters/example_test.go
@@ -0,0 +1,24 @@
+package filters
+
+func ExampleArgs_MatchKVList() {
+	args := NewArgs(
+		Arg("label", "image=foo"),
+		Arg("label", "state=running"))
+
+	// returns true because there are no values for bogus
+	args.MatchKVList("bogus", nil)
+
+	// returns false because there are no sources
+	args.MatchKVList("label", nil)
+
+	// returns true because all sources are matched
+	args.MatchKVList("label", map[string]string{
+		"image": "foo",
+		"state": "running",
+	})
+
+	// returns false because the values do not match
+	args.MatchKVList("label", map[string]string{
+		"image": "other",
+	})
+}
diff --git a/api/types/filters/parse.go b/api/types/filters/parse.go
index 363d454..d45d052 100644
--- a/api/types/filters/parse.go
+++ b/api/types/filters/parse.go
@@ -1,5 +1,6 @@
-// Package filters provides helper function to parse and handle command line
-// filter, used for example in docker ps or docker images commands.
+/*Package filters provides tools for encoding a mapping of keys to a set of
+multiple values.
+*/
 package filters
 
 import (
@@ -11,27 +12,34 @@
 	"github.com/docker/docker/api/types/versions"
 )
 
-// Args stores filter arguments as map key:{map key: bool}.
-// It contains an aggregation of the map of arguments (which are in the form
-// of -f 'key=value') based on the key, and stores values for the same key
-// in a map with string keys and boolean values.
-// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
-// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
+// Args stores a mapping of keys to a set of multiple values.
 type Args struct {
 	fields map[string]map[string]bool
 }
 
-// NewArgs initializes a new Args struct.
-func NewArgs() Args {
-	return Args{fields: map[string]map[string]bool{}}
+// KeyValuePair are used to initialize a new Args
+type KeyValuePair struct {
+	Key   string
+	Value string
 }
 
-// ParseFlag parses the argument to the filter flag. Like
+// Arg creates a new KeyValuePair for initializing Args
+func Arg(key, value string) KeyValuePair {
+	return KeyValuePair{Key: key, Value: value}
+}
+
+// NewArgs returns a new Args populated with the initial args
+func NewArgs(initialArgs ...KeyValuePair) Args {
+	args := Args{fields: map[string]map[string]bool{}}
+	for _, arg := range initialArgs {
+		args.Add(arg.Key, arg.Value)
+	}
+	return args
+}
+
+// ParseFlag parses a key=value string and adds it to an Args.
 //
-//   `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
-//
-// If prev map is provided, then it is appended to, and returned. By default a new
-// map is created.
+// Deprecated: Use Args.Add()
 func ParseFlag(arg string, prev Args) (Args, error) {
 	filters := prev
 	if len(arg) == 0 {
@@ -52,74 +60,95 @@
 	return filters, nil
 }
 
-// ErrBadFormat is an error returned in case of bad format for a filter.
+// ErrBadFormat is an error returned when a filter is not in the form key=value
+//
+// Deprecated: this error will be removed in a future version
 var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
 
-// ToParam packs the Args into a string for easy transport from client to server.
+// ToParam encodes the Args as args JSON encoded string
+//
+// Deprecated: use ToJSON
 func ToParam(a Args) (string, error) {
-	// this way we don't URL encode {}, just empty space
+	return ToJSON(a)
+}
+
+// MarshalJSON returns a JSON byte representation of the Args
+func (args Args) MarshalJSON() ([]byte, error) {
+	if len(args.fields) == 0 {
+		return []byte{}, nil
+	}
+	return json.Marshal(args.fields)
+}
+
+// ToJSON returns the Args as a JSON encoded string
+func ToJSON(a Args) (string, error) {
 	if a.Len() == 0 {
 		return "", nil
 	}
-
-	buf, err := json.Marshal(a.fields)
-	if err != nil {
-		return "", err
-	}
-	return string(buf), nil
+	buf, err := json.Marshal(a)
+	return string(buf), err
 }
 
-// ToParamWithVersion packs the Args into a string for easy transport from client to server.
-// The generated string will depend on the specified version (corresponding to the API version).
+// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22
+// then the encoded format will use an older legacy format where the values are a
+// list of strings, instead of a set.
+//
+// Deprecated: Use ToJSON
 func ToParamWithVersion(version string, a Args) (string, error) {
-	// this way we don't URL encode {}, just empty space
 	if a.Len() == 0 {
 		return "", nil
 	}
 
-	// for daemons older than v1.10, filter must be of the form map[string][]string
-	var buf []byte
-	var err error
 	if version != "" && versions.LessThan(version, "1.22") {
-		buf, err = json.Marshal(convertArgsToSlice(a.fields))
-	} else {
-		buf, err = json.Marshal(a.fields)
+		buf, err := json.Marshal(convertArgsToSlice(a.fields))
+		return string(buf), err
 	}
-	if err != nil {
-		return "", err
-	}
-	return string(buf), nil
+
+	return ToJSON(a)
 }
 
-// FromParam unpacks the filter Args.
+// FromParam decodes a JSON encoded string into Args
+//
+// Deprecated: use FromJSON
 func FromParam(p string) (Args, error) {
-	if len(p) == 0 {
-		return NewArgs(), nil
-	}
-
-	r := strings.NewReader(p)
-	d := json.NewDecoder(r)
-
-	m := map[string]map[string]bool{}
-	if err := d.Decode(&m); err != nil {
-		r.Seek(0, 0)
-
-		// Allow parsing old arguments in slice format.
-		// Because other libraries might be sending them in this format.
-		deprecated := map[string][]string{}
-		if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
-			m = deprecatedArgs(deprecated)
-		} else {
-			return NewArgs(), err
-		}
-	}
-	return Args{m}, nil
+	return FromJSON(p)
 }
 
-// Get returns the list of values associates with a field.
-// It returns a slice of strings to keep backwards compatibility with old code.
-func (filters Args) Get(field string) []string {
-	values := filters.fields[field]
+// FromJSON decodes a JSON encoded string into Args
+func FromJSON(p string) (Args, error) {
+	args := NewArgs()
+
+	if p == "" {
+		return args, nil
+	}
+
+	raw := []byte(p)
+	err := json.Unmarshal(raw, &args)
+	if err == nil {
+		return args, nil
+	}
+
+	// Fallback to parsing arguments in the legacy slice format
+	deprecated := map[string][]string{}
+	if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil {
+		return args, err
+	}
+
+	args.fields = deprecatedArgs(deprecated)
+	return args, nil
+}
+
+// UnmarshalJSON populates the Args from JSON encode bytes
+func (args Args) UnmarshalJSON(raw []byte) error {
+	if len(raw) == 0 {
+		return nil
+	}
+	return json.Unmarshal(raw, &args.fields)
+}
+
+// Get returns the list of values associated with the key
+func (args Args) Get(key string) []string {
+	values := args.fields[key]
 	if values == nil {
 		return make([]string, 0)
 	}
@@ -130,37 +159,34 @@
 	return slice
 }
 
-// Add adds a new value to a filter field.
-func (filters Args) Add(name, value string) {
-	if _, ok := filters.fields[name]; ok {
-		filters.fields[name][value] = true
+// Add a new value to the set of values
+func (args Args) Add(key, value string) {
+	if _, ok := args.fields[key]; ok {
+		args.fields[key][value] = true
 	} else {
-		filters.fields[name] = map[string]bool{value: true}
+		args.fields[key] = map[string]bool{value: true}
 	}
 }
 
-// Del removes a value from a filter field.
-func (filters Args) Del(name, value string) {
-	if _, ok := filters.fields[name]; ok {
-		delete(filters.fields[name], value)
-		if len(filters.fields[name]) == 0 {
-			delete(filters.fields, name)
+// Del removes a value from the set
+func (args Args) Del(key, value string) {
+	if _, ok := args.fields[key]; ok {
+		delete(args.fields[key], value)
+		if len(args.fields[key]) == 0 {
+			delete(args.fields, key)
 		}
 	}
 }
 
-// Len returns the number of fields in the arguments.
-func (filters Args) Len() int {
-	return len(filters.fields)
+// Len returns the number of keys in the mapping
+func (args Args) Len() int {
+	return len(args.fields)
 }
 
-// MatchKVList returns true if the values for the specified field matches the ones
-// from the sources.
-// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
-//      field is 'label' and sources are {'label1': '1', 'label2': '2'}
-//      it returns true.
-func (filters Args) MatchKVList(field string, sources map[string]string) bool {
-	fieldValues := filters.fields[field]
+// MatchKVList returns true if all the pairs in sources exist as key=value
+// pairs in the mapping at key, or if there are no values at key.
+func (args Args) MatchKVList(key string, sources map[string]string) bool {
+	fieldValues := args.fields[key]
 
 	//do not filter if there is no filter set or cannot determine filter
 	if len(fieldValues) == 0 {
@@ -171,8 +197,8 @@
 		return false
 	}
 
-	for name2match := range fieldValues {
-		testKV := strings.SplitN(name2match, "=", 2)
+	for value := range fieldValues {
+		testKV := strings.SplitN(value, "=", 2)
 
 		v, ok := sources[testKV[0]]
 		if !ok {
@@ -186,16 +212,13 @@
 	return true
 }
 
-// Match returns true if the values for the specified field matches the source string
-// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
-//      field is 'image.name' and source is 'ubuntu'
-//      it returns true.
-func (filters Args) Match(field, source string) bool {
-	if filters.ExactMatch(field, source) {
+// Match returns true if any of the values at key match the source string
+func (args Args) Match(field, source string) bool {
+	if args.ExactMatch(field, source) {
 		return true
 	}
 
-	fieldValues := filters.fields[field]
+	fieldValues := args.fields[field]
 	for name2match := range fieldValues {
 		match, err := regexp.MatchString(name2match, source)
 		if err != nil {
@@ -208,9 +231,9 @@
 	return false
 }
 
-// ExactMatch returns true if the source matches exactly one of the filters.
-func (filters Args) ExactMatch(field, source string) bool {
-	fieldValues, ok := filters.fields[field]
+// ExactMatch returns true if the source matches exactly one of the values.
+func (args Args) ExactMatch(key, source string) bool {
+	fieldValues, ok := args.fields[key]
 	//do not filter if there is no filter set or cannot determine filter
 	if !ok || len(fieldValues) == 0 {
 		return true
@@ -220,14 +243,15 @@
 	return fieldValues[source]
 }
 
-// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
-func (filters Args) UniqueExactMatch(field, source string) bool {
-	fieldValues := filters.fields[field]
+// UniqueExactMatch returns true if there is only one value and the source
+// matches exactly the value.
+func (args Args) UniqueExactMatch(key, source string) bool {
+	fieldValues := args.fields[key]
 	//do not filter if there is no filter set or cannot determine filter
 	if len(fieldValues) == 0 {
 		return true
 	}
-	if len(filters.fields[field]) != 1 {
+	if len(args.fields[key]) != 1 {
 		return false
 	}
 
@@ -235,14 +259,14 @@
 	return fieldValues[source]
 }
 
-// FuzzyMatch returns true if the source matches exactly one of the filters,
-// or the source has one of the filters as a prefix.
-func (filters Args) FuzzyMatch(field, source string) bool {
-	if filters.ExactMatch(field, source) {
+// FuzzyMatch returns true if the source matches exactly one value,  or the
+// source has one of the values as a prefix.
+func (args Args) FuzzyMatch(key, source string) bool {
+	if args.ExactMatch(key, source) {
 		return true
 	}
 
-	fieldValues := filters.fields[field]
+	fieldValues := args.fields[key]
 	for prefix := range fieldValues {
 		if strings.HasPrefix(source, prefix) {
 			return true
@@ -251,9 +275,17 @@
 	return false
 }
 
-// Include returns true if the name of the field to filter is in the filters.
-func (filters Args) Include(field string) bool {
-	_, ok := filters.fields[field]
+// Include returns true if the key exists in the mapping
+//
+// Deprecated: use Contains
+func (args Args) Include(field string) bool {
+	_, ok := args.fields[field]
+	return ok
+}
+
+// Contains returns true if the key exists in the mapping
+func (args Args) Contains(field string) bool {
+	_, ok := args.fields[field]
 	return ok
 }
 
@@ -265,10 +297,10 @@
 
 func (invalidFilter) InvalidParameter() {}
 
-// Validate ensures that all the fields in the filter are valid.
-// It returns an error as soon as it finds an invalid field.
-func (filters Args) Validate(accepted map[string]bool) error {
-	for name := range filters.fields {
+// Validate compared the set of accepted keys against the keys in the mapping.
+// An error is returned if any mapping keys are not in the accepted set.
+func (args Args) Validate(accepted map[string]bool) error {
+	for name := range args.fields {
 		if !accepted[name] {
 			return invalidFilter(name)
 		}
@@ -276,13 +308,14 @@
 	return nil
 }
 
-// WalkValues iterates over the list of filtered values for a field.
-// It stops the iteration if it finds an error and it returns that error.
-func (filters Args) WalkValues(field string, op func(value string) error) error {
-	if _, ok := filters.fields[field]; !ok {
+// WalkValues iterates over the list of values for a key in the mapping and calls
+// op() for each value. If op returns an error the iteration stops and the
+// error is returned.
+func (args Args) WalkValues(field string, op func(value string) error) error {
+	if _, ok := args.fields[field]; !ok {
 		return nil
 	}
-	for v := range filters.fields[field] {
+	for v := range args.fields[field] {
 		if err := op(v); err != nil {
 			return err
 		}
diff --git a/api/types/filters/parse_test.go b/api/types/filters/parse_test.go
index ccd1684..67b2ec9 100644
--- a/api/types/filters/parse_test.go
+++ b/api/types/filters/parse_test.go
@@ -3,6 +3,9 @@
 import (
 	"errors"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 func TestParseArgs(t *testing.T) {
@@ -16,23 +19,18 @@
 		args = NewArgs()
 		err  error
 	)
+
 	for i := range flagArgs {
 		args, err = ParseFlag(flagArgs[i], args)
-		if err != nil {
-			t.Errorf("failed to parse %s: %s", flagArgs[i], err)
-		}
+		require.NoError(t, err)
 	}
-	if len(args.Get("created")) != 1 {
-		t.Error("failed to set this arg")
-	}
-	if len(args.Get("image.name")) != 2 {
-		t.Error("the args should have collapsed")
-	}
+	assert.Len(t, args.Get("created"), 1)
+	assert.Len(t, args.Get("image.name"), 2)
 }
 
 func TestParseArgsEdgeCase(t *testing.T) {
-	var filters Args
-	args, err := ParseFlag("", filters)
+	var args Args
+	args, err := ParseFlag("", args)
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -44,14 +42,14 @@
 	}
 }
 
-func TestToParam(t *testing.T) {
+func TestToJSON(t *testing.T) {
 	fields := map[string]map[string]bool{
 		"created":    {"today": true},
 		"image.name": {"ubuntu*": true, "*untu": true},
 	}
 	a := Args{fields: fields}
 
-	_, err := ToParam(a)
+	_, err := ToJSON(a)
 	if err != nil {
 		t.Errorf("failed to marshal the filters: %s", err)
 	}
@@ -82,7 +80,7 @@
 	}
 }
 
-func TestFromParam(t *testing.T) {
+func TestFromJSON(t *testing.T) {
 	invalids := []string{
 		"anything",
 		"['a','list']",
@@ -105,14 +103,14 @@
 	}
 
 	for _, invalid := range invalids {
-		if _, err := FromParam(invalid); err == nil {
+		if _, err := FromJSON(invalid); err == nil {
 			t.Fatalf("Expected an error with %v, got nothing", invalid)
 		}
 	}
 
 	for expectedArgs, matchers := range valid {
 		for _, json := range matchers {
-			args, err := FromParam(json)
+			args, err := FromJSON(json)
 			if err != nil {
 				t.Fatal(err)
 			}
@@ -138,11 +136,11 @@
 
 func TestEmpty(t *testing.T) {
 	a := Args{}
-	v, err := ToParam(a)
+	v, err := ToJSON(a)
 	if err != nil {
 		t.Errorf("failed to marshal the filters: %s", err)
 	}
-	v1, err := FromParam(v)
+	v1, err := FromJSON(v)
 	if err != nil {
 		t.Errorf("%s", err)
 	}
@@ -184,7 +182,7 @@
 	}
 
 	for args, field := range matches {
-		if args.MatchKVList(field, sources) != true {
+		if !args.MatchKVList(field, sources) {
 			t.Fatalf("Expected true for %v on %v, got false", sources, args)
 		}
 	}
@@ -204,7 +202,7 @@
 	}
 
 	for args, field := range differs {
-		if args.MatchKVList(field, sources) != false {
+		if args.MatchKVList(field, sources) {
 			t.Fatalf("Expected false for %v on %v, got true", sources, args)
 		}
 	}
@@ -233,9 +231,8 @@
 	}
 
 	for args, field := range matches {
-		if args.Match(field, source) != true {
-			t.Fatalf("Expected true for %v on %v, got false", source, args)
-		}
+		assert.True(t, args.Match(field, source),
+			"Expected field %s to match %s", field, source)
 	}
 
 	differs := map[*Args]string{
@@ -258,9 +255,8 @@
 	}
 
 	for args, field := range differs {
-		if args.Match(field, source) != false {
-			t.Fatalf("Expected false for %v on %v, got true", source, args)
-		}
+		assert.False(t, args.Match(field, source),
+			"Expected field %s to not match %s", field, source)
 	}
 }
 
@@ -341,6 +337,17 @@
 	}
 }
 
+func TestContains(t *testing.T) {
+	f := NewArgs()
+	if f.Contains("status") {
+		t.Fatal("Expected to not contain a status key, got true")
+	}
+	f.Add("status", "running")
+	if !f.Contains("status") {
+		t.Fatal("Expected to contain a status key, got false")
+	}
+}
+
 func TestInclude(t *testing.T) {
 	f := NewArgs()
 	if f.Include("status") {
diff --git a/api/types/swarm/container.go b/api/types/swarm/container.go
index 6f8b45f..734236c 100644
--- a/api/types/swarm/container.go
+++ b/api/types/swarm/container.go
@@ -65,8 +65,9 @@
 	// The format of extra hosts on swarmkit is specified in:
 	// http://man7.org/linux/man-pages/man5/hosts.5.html
 	//    IP_address canonical_hostname [aliases...]
-	Hosts     []string           `json:",omitempty"`
-	DNSConfig *DNSConfig         `json:",omitempty"`
-	Secrets   []*SecretReference `json:",omitempty"`
-	Configs   []*ConfigReference `json:",omitempty"`
+	Hosts     []string            `json:",omitempty"`
+	DNSConfig *DNSConfig          `json:",omitempty"`
+	Secrets   []*SecretReference  `json:",omitempty"`
+	Configs   []*ConfigReference  `json:",omitempty"`
+	Isolation container.Isolation `json:",omitempty"`
 }
diff --git a/api/types/time/timestamp.go b/api/types/time/timestamp.go
index 9aa9702..ed9c116 100644
--- a/api/types/time/timestamp.go
+++ b/api/types/time/timestamp.go
@@ -29,10 +29,8 @@
 	}
 
 	var format string
-	var parseInLocation bool
-
 	// if the string has a Z or a + or three dashes use parse otherwise use parseinlocation
-	parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
+	parseInLocation := !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3)
 
 	if strings.Contains(value, ".") {
 		if parseInLocation {
diff --git a/builder/builder.go b/builder/builder.go
index e480601..5a3e2cd 100644
--- a/builder/builder.go
+++ b/builder/builder.go
@@ -12,6 +12,7 @@
 	"github.com/docker/docker/api/types/container"
 	containerpkg "github.com/docker/docker/container"
 	"github.com/docker/docker/layer"
+	"github.com/docker/docker/pkg/containerfs"
 	"golang.org/x/net/context"
 )
 
@@ -24,7 +25,7 @@
 // instructions in the builder.
 type Source interface {
 	// Root returns root path for accessing source
-	Root() string
+	Root() containerfs.ContainerFS
 	// Close allows to signal that the filesystem tree won't be used anymore.
 	// For Context implementations using a temporary directory, it is recommended to
 	// delete the temporary directory in Close().
@@ -94,12 +95,13 @@
 	ImageID() string
 	RunConfig() *container.Config
 	MarshalJSON() ([]byte, error)
+	OperatingSystem() string
 }
 
 // ReleaseableLayer is an image layer that can be mounted and released
 type ReleaseableLayer interface {
 	Release() error
-	Mount() (string, error)
+	Mount() (containerfs.ContainerFS, error)
 	Commit(platform string) (ReleaseableLayer, error)
 	DiffID() layer.DiffID
 }
diff --git a/builder/dockerfile/bflag.go b/builder/dockerfile/bflag.go
deleted file mode 100644
index d849661..0000000
--- a/builder/dockerfile/bflag.go
+++ /dev/null
@@ -1,183 +0,0 @@
-package dockerfile
-
-import (
-	"fmt"
-	"strings"
-)
-
-// FlagType is the type of the build flag
-type FlagType int
-
-const (
-	boolType FlagType = iota
-	stringType
-)
-
-// BFlags contains all flags information for the builder
-type BFlags struct {
-	Args  []string // actual flags/args from cmd line
-	flags map[string]*Flag
-	used  map[string]*Flag
-	Err   error
-}
-
-// Flag contains all information for a flag
-type Flag struct {
-	bf       *BFlags
-	name     string
-	flagType FlagType
-	Value    string
-}
-
-// NewBFlags returns the new BFlags struct
-func NewBFlags() *BFlags {
-	return &BFlags{
-		flags: make(map[string]*Flag),
-		used:  make(map[string]*Flag),
-	}
-}
-
-// NewBFlagsWithArgs returns the new BFlags struct with Args set to args
-func NewBFlagsWithArgs(args []string) *BFlags {
-	flags := NewBFlags()
-	flags.Args = args
-	return flags
-}
-
-// AddBool adds a bool flag to BFlags
-// Note, any error will be generated when Parse() is called (see Parse).
-func (bf *BFlags) AddBool(name string, def bool) *Flag {
-	flag := bf.addFlag(name, boolType)
-	if flag == nil {
-		return nil
-	}
-	if def {
-		flag.Value = "true"
-	} else {
-		flag.Value = "false"
-	}
-	return flag
-}
-
-// AddString adds a string flag to BFlags
-// Note, any error will be generated when Parse() is called (see Parse).
-func (bf *BFlags) AddString(name string, def string) *Flag {
-	flag := bf.addFlag(name, stringType)
-	if flag == nil {
-		return nil
-	}
-	flag.Value = def
-	return flag
-}
-
-// addFlag is a generic func used by the other AddXXX() func
-// to add a new flag to the BFlags struct.
-// Note, any error will be generated when Parse() is called (see Parse).
-func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag {
-	if _, ok := bf.flags[name]; ok {
-		bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
-		return nil
-	}
-
-	newFlag := &Flag{
-		bf:       bf,
-		name:     name,
-		flagType: flagType,
-	}
-	bf.flags[name] = newFlag
-
-	return newFlag
-}
-
-// IsUsed checks if the flag is used
-func (fl *Flag) IsUsed() bool {
-	if _, ok := fl.bf.used[fl.name]; ok {
-		return true
-	}
-	return false
-}
-
-// IsTrue checks if a bool flag is true
-func (fl *Flag) IsTrue() bool {
-	if fl.flagType != boolType {
-		// Should never get here
-		panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
-	}
-	return fl.Value == "true"
-}
-
-// Parse parses and checks if the BFlags is valid.
-// Any error noticed during the AddXXX() funcs will be generated/returned
-// here.  We do this because an error during AddXXX() is more like a
-// compile time error so it doesn't matter too much when we stop our
-// processing as long as we do stop it, so this allows the code
-// around AddXXX() to be just:
-//     defFlag := AddString("description", "")
-// w/o needing to add an if-statement around each one.
-func (bf *BFlags) Parse() error {
-	// If there was an error while defining the possible flags
-	// go ahead and bubble it back up here since we didn't do it
-	// earlier in the processing
-	if bf.Err != nil {
-		return fmt.Errorf("Error setting up flags: %s", bf.Err)
-	}
-
-	for _, arg := range bf.Args {
-		if !strings.HasPrefix(arg, "--") {
-			return fmt.Errorf("Arg should start with -- : %s", arg)
-		}
-
-		if arg == "--" {
-			return nil
-		}
-
-		arg = arg[2:]
-		value := ""
-
-		index := strings.Index(arg, "=")
-		if index >= 0 {
-			value = arg[index+1:]
-			arg = arg[:index]
-		}
-
-		flag, ok := bf.flags[arg]
-		if !ok {
-			return fmt.Errorf("Unknown flag: %s", arg)
-		}
-
-		if _, ok = bf.used[arg]; ok {
-			return fmt.Errorf("Duplicate flag specified: %s", arg)
-		}
-
-		bf.used[arg] = flag
-
-		switch flag.flagType {
-		case boolType:
-			// value == "" is only ok if no "=" was specified
-			if index >= 0 && value == "" {
-				return fmt.Errorf("Missing a value on flag: %s", arg)
-			}
-
-			lower := strings.ToLower(value)
-			if lower == "" {
-				flag.Value = "true"
-			} else if lower == "true" || lower == "false" {
-				flag.Value = lower
-			} else {
-				return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
-			}
-
-		case stringType:
-			if index < 0 {
-				return fmt.Errorf("Missing a value on flag: %s", arg)
-			}
-			flag.Value = value
-
-		default:
-			panic("No idea what kind of flag we have! Should never get here!")
-		}
-
-	}
-
-	return nil
-}
diff --git a/builder/dockerfile/bflag_test.go b/builder/dockerfile/bflag_test.go
deleted file mode 100644
index ac07e48..0000000
--- a/builder/dockerfile/bflag_test.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package dockerfile
-
-import (
-	"testing"
-)
-
-func TestBuilderFlags(t *testing.T) {
-	var expected string
-	var err error
-
-	// ---
-
-	bf := NewBFlags()
-	bf.Args = []string{}
-	if err := bf.Parse(); err != nil {
-		t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	bf.Args = []string{"--"}
-	if err := bf.Parse(); err != nil {
-		t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flStr1 := bf.AddString("str1", "")
-	flBool1 := bf.AddBool("bool1", false)
-	bf.Args = []string{}
-	if err = bf.Parse(); err != nil {
-		t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
-	}
-
-	if flStr1.IsUsed() == true {
-		t.Fatal("Test3 - str1 was not used!")
-	}
-	if flBool1.IsUsed() == true {
-		t.Fatal("Test3 - bool1 was not used!")
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flStr1 = bf.AddString("str1", "HI")
-	flBool1 = bf.AddBool("bool1", false)
-	bf.Args = []string{}
-
-	if err = bf.Parse(); err != nil {
-		t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
-	}
-
-	if flStr1.Value != "HI" {
-		t.Fatal("Str1 was supposed to default to: HI")
-	}
-	if flBool1.IsTrue() {
-		t.Fatal("Bool1 was supposed to default to: false")
-	}
-	if flStr1.IsUsed() == true {
-		t.Fatal("Str1 was not used!")
-	}
-	if flBool1.IsUsed() == true {
-		t.Fatal("Bool1 was not used!")
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flStr1 = bf.AddString("str1", "HI")
-	bf.Args = []string{"--str1"}
-
-	if err = bf.Parse(); err == nil {
-		t.Fatalf("Test %q was supposed to fail", bf.Args)
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flStr1 = bf.AddString("str1", "HI")
-	bf.Args = []string{"--str1="}
-
-	if err = bf.Parse(); err != nil {
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
-	}
-
-	expected = ""
-	if flStr1.Value != expected {
-		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flStr1 = bf.AddString("str1", "HI")
-	bf.Args = []string{"--str1=BYE"}
-
-	if err = bf.Parse(); err != nil {
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
-	}
-
-	expected = "BYE"
-	if flStr1.Value != expected {
-		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flBool1 = bf.AddBool("bool1", false)
-	bf.Args = []string{"--bool1"}
-
-	if err = bf.Parse(); err != nil {
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
-	}
-
-	if !flBool1.IsTrue() {
-		t.Fatal("Test-b1 Bool1 was supposed to be true")
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flBool1 = bf.AddBool("bool1", false)
-	bf.Args = []string{"--bool1=true"}
-
-	if err = bf.Parse(); err != nil {
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
-	}
-
-	if !flBool1.IsTrue() {
-		t.Fatal("Test-b2 Bool1 was supposed to be true")
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flBool1 = bf.AddBool("bool1", false)
-	bf.Args = []string{"--bool1=false"}
-
-	if err = bf.Parse(); err != nil {
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
-	}
-
-	if flBool1.IsTrue() {
-		t.Fatal("Test-b3 Bool1 was supposed to be false")
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flBool1 = bf.AddBool("bool1", false)
-	bf.Args = []string{"--bool1=false1"}
-
-	if err = bf.Parse(); err == nil {
-		t.Fatalf("Test %q was supposed to fail", bf.Args)
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flBool1 = bf.AddBool("bool1", false)
-	bf.Args = []string{"--bool2"}
-
-	if err = bf.Parse(); err == nil {
-		t.Fatalf("Test %q was supposed to fail", bf.Args)
-	}
-
-	// ---
-
-	bf = NewBFlags()
-	flStr1 = bf.AddString("str1", "HI")
-	flBool1 = bf.AddBool("bool1", false)
-	bf.Args = []string{"--bool1", "--str1=BYE"}
-
-	if err = bf.Parse(); err != nil {
-		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
-	}
-
-	if flStr1.Value != "BYE" {
-		t.Fatalf("Test %s, str1 should be BYE", bf.Args)
-	}
-	if !flBool1.IsTrue() {
-		t.Fatalf("Test %s, bool1 should be true", bf.Args)
-	}
-}
diff --git a/builder/dockerfile/buildargs.go b/builder/dockerfile/buildargs.go
index e0daf9a..c8f34a7 100644
--- a/builder/dockerfile/buildargs.go
+++ b/builder/dockerfile/buildargs.go
@@ -42,6 +42,26 @@
 	}
 }
 
+func (b *buildArgs) Clone() *buildArgs {
+	result := newBuildArgs(b.argsFromOptions)
+	for k, v := range b.allowedBuildArgs {
+		result.allowedBuildArgs[k] = v
+	}
+	for k, v := range b.allowedMetaArgs {
+		result.allowedMetaArgs[k] = v
+	}
+	for k := range b.referencedArgs {
+		result.referencedArgs[k] = struct{}{}
+	}
+	return result
+}
+
+func (b *buildArgs) MergeReferencedArgs(other *buildArgs) {
+	for k := range other.referencedArgs {
+		b.referencedArgs[k] = struct{}{}
+	}
+}
+
 // WarnOnUnusedBuildArgs checks if there are any leftover build-args that were
 // passed but not consumed during build. Print a warning, if there are any.
 func (b *buildArgs) WarnOnUnusedBuildArgs(out io.Writer) {
diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go
index 34db378..b62d6fc 100644
--- a/builder/dockerfile/builder.go
+++ b/builder/dockerfile/builder.go
@@ -13,12 +13,10 @@
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/builder"
-	"github.com/docker/docker/builder/dockerfile/command"
+	"github.com/docker/docker/builder/dockerfile/instructions"
 	"github.com/docker/docker/builder/dockerfile/parser"
 	"github.com/docker/docker/builder/fscache"
 	"github.com/docker/docker/builder/remotecontext"
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/streamformatter"
 	"github.com/docker/docker/pkg/stringid"
@@ -43,6 +41,10 @@
 	"workdir":     true,
 }
 
+const (
+	stepFormat = "Step %d/%d : %v"
+)
+
 // SessionGetter is object used to get access to a session by uuid
 type SessionGetter interface {
 	Get(ctx context.Context, uuid string) (session.Caller, error)
@@ -50,21 +52,21 @@
 
 // BuildManager is shared across all Builder objects
 type BuildManager struct {
-	archiver  *archive.Archiver
-	backend   builder.Backend
-	pathCache pathCache // TODO: make this persistent
-	sg        SessionGetter
-	fsCache   *fscache.FSCache
+	idMappings *idtools.IDMappings
+	backend    builder.Backend
+	pathCache  pathCache // TODO: make this persistent
+	sg         SessionGetter
+	fsCache    *fscache.FSCache
 }
 
 // NewBuildManager creates a BuildManager
 func NewBuildManager(b builder.Backend, sg SessionGetter, fsCache *fscache.FSCache, idMappings *idtools.IDMappings) (*BuildManager, error) {
 	bm := &BuildManager{
-		backend:   b,
-		pathCache: &syncmap.Map{},
-		sg:        sg,
-		archiver:  chrootarchive.NewArchiver(idMappings),
-		fsCache:   fsCache,
+		backend:    b,
+		pathCache:  &syncmap.Map{},
+		sg:         sg,
+		idMappings: idMappings,
+		fsCache:    fsCache,
 	}
 	if err := fsCache.RegisterTransport(remotecontext.ClientSessionRemote, NewClientSessionTransport()); err != nil {
 		return nil, err
@@ -91,15 +93,6 @@
 		}
 	}()
 
-	// TODO @jhowardmsft LCOW support - this will require rework to allow both linux and Windows simultaneously.
-	// This is an interim solution to hardcode to linux if LCOW is turned on.
-	if dockerfile.Platform == "" {
-		dockerfile.Platform = runtime.GOOS
-		if dockerfile.Platform == "windows" && system.LCOWSupported() {
-			dockerfile.Platform = "linux"
-		}
-	}
-
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 
@@ -109,16 +102,27 @@
 		source = src
 	}
 
+	os := runtime.GOOS
+	optionsPlatform := system.ParsePlatform(config.Options.Platform)
+	if dockerfile.OS != "" {
+		if optionsPlatform.OS != "" && optionsPlatform.OS != dockerfile.OS {
+			return nil, fmt.Errorf("invalid platform")
+		}
+		os = dockerfile.OS
+	} else if optionsPlatform.OS != "" {
+		os = optionsPlatform.OS
+	}
+	config.Options.Platform = os
+	dockerfile.OS = os
+
 	builderOptions := builderOptions{
 		Options:        config.Options,
 		ProgressWriter: config.ProgressWriter,
 		Backend:        bm.backend,
 		PathCache:      bm.pathCache,
-		Archiver:       bm.archiver,
-		Platform:       dockerfile.Platform,
+		IDMappings:     bm.idMappings,
 	}
-
-	return newBuilder(ctx, builderOptions).build(source, dockerfile)
+	return newBuilder(ctx, builderOptions, os).build(source, dockerfile)
 }
 
 func (bm *BuildManager) initializeClientSession(ctx context.Context, cancel func(), options *types.ImageBuildOptions) (builder.Source, error) {
@@ -127,10 +131,10 @@
 	}
 	logrus.Debug("client is session enabled")
 
-	ctx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout)
+	connectCtx, cancelCtx := context.WithTimeout(ctx, sessionConnectTimeout)
 	defer cancelCtx()
 
-	c, err := bm.sg.Get(ctx, options.SessionID)
+	c, err := bm.sg.Get(connectCtx, options.SessionID)
 	if err != nil {
 		return nil, err
 	}
@@ -160,8 +164,7 @@
 	Backend        builder.Backend
 	ProgressWriter backend.ProgressWriter
 	PathCache      pathCache
-	Archiver       *archive.Archiver
-	Platform       string
+	IDMappings     *idtools.IDMappings
 }
 
 // Builder is a Dockerfile builder
@@ -177,40 +180,21 @@
 	docker    builder.Backend
 	clientCtx context.Context
 
-	archiver         *archive.Archiver
-	buildStages      *buildStages
+	idMappings       *idtools.IDMappings
 	disableCommit    bool
-	buildArgs        *buildArgs
 	imageSources     *imageSources
 	pathCache        pathCache
 	containerManager *containerManager
 	imageProber      ImageProber
-
-	// TODO @jhowardmft LCOW Support. This will be moved to options at a later
-	// stage, however that cannot be done now as it affects the public API
-	// if it were.
-	platform string
 }
 
 // newBuilder creates a new Dockerfile builder from an optional dockerfile and a Options.
-// TODO @jhowardmsft LCOW support: Eventually platform can be moved into the builder
-// options, however, that would be an API change as it shares types.ImageBuildOptions.
-func newBuilder(clientCtx context.Context, options builderOptions) *Builder {
+func newBuilder(clientCtx context.Context, options builderOptions, os string) *Builder {
 	config := options.Options
 	if config == nil {
 		config = new(types.ImageBuildOptions)
 	}
 
-	// @jhowardmsft LCOW Support. For the time being, this is interim. Eventually
-	// will be moved to types.ImageBuildOptions, but it can't for now as that would
-	// be an API change.
-	if options.Platform == "" {
-		options.Platform = runtime.GOOS
-	}
-	if options.Platform == "windows" && system.LCOWSupported() {
-		options.Platform = "linux"
-	}
-
 	b := &Builder{
 		clientCtx:        clientCtx,
 		options:          config,
@@ -219,14 +203,11 @@
 		Aux:              options.ProgressWriter.AuxFormatter,
 		Output:           options.ProgressWriter.Output,
 		docker:           options.Backend,
-		archiver:         options.Archiver,
-		buildArgs:        newBuildArgs(config.BuildArgs),
-		buildStages:      newBuildStages(),
+		idMappings:       options.IDMappings,
 		imageSources:     newImageSources(clientCtx, options),
 		pathCache:        options.PathCache,
-		imageProber:      newImageProber(options.Backend, config.CacheFrom, options.Platform, config.NoCache),
+		imageProber:      newImageProber(options.Backend, config.CacheFrom, os, config.NoCache),
 		containerManager: newContainerManager(options.Backend),
-		platform:         options.Platform,
 	}
 
 	return b
@@ -239,24 +220,27 @@
 
 	addNodesForLabelOption(dockerfile.AST, b.options.Labels)
 
-	if err := checkDispatchDockerfile(dockerfile.AST); err != nil {
-		buildsFailed.WithValues(metricsDockerfileSyntaxError).Inc()
+	stages, metaArgs, err := instructions.Parse(dockerfile.AST)
+	if err != nil {
+		if instructions.IsUnknownInstruction(err) {
+			buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
+		}
 		return nil, validationError{err}
 	}
-
-	dispatchState, err := b.dispatchDockerfileWithCancellation(dockerfile, source)
-	if err != nil {
-		return nil, err
-	}
-
-	if b.options.Target != "" && !dispatchState.isCurrentStage(b.options.Target) {
-		buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc()
-		return nil, errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)
+	if b.options.Target != "" {
+		targetIx, found := instructions.HasStage(stages, b.options.Target)
+		if !found {
+			buildsFailed.WithValues(metricsBuildTargetNotReachableError).Inc()
+			return nil, errors.Errorf("failed to reach build target %s in Dockerfile", b.options.Target)
+		}
+		stages = stages[:targetIx+1]
 	}
 
 	dockerfile.PrintWarnings(b.Stderr)
-	b.buildArgs.WarnOnUnusedBuildArgs(b.Stderr)
-
+	dispatchState, err := b.dispatchDockerfileWithCancellation(stages, metaArgs, dockerfile.EscapeToken, source)
+	if err != nil {
+		return nil, err
+	}
 	if dispatchState.imageID == "" {
 		buildsFailed.WithValues(metricsDockerfileEmptyError).Inc()
 		return nil, errors.New("No image was generated. Is your Dockerfile empty?")
@@ -271,61 +255,87 @@
 	return aux.Emit(types.BuildResult{ID: state.imageID})
 }
 
-func (b *Builder) dispatchDockerfileWithCancellation(dockerfile *parser.Result, source builder.Source) (*dispatchState, error) {
-	shlex := NewShellLex(dockerfile.EscapeToken)
-	state := newDispatchState()
-	total := len(dockerfile.AST.Children)
-	var err error
-	for i, n := range dockerfile.AST.Children {
-		select {
-		case <-b.clientCtx.Done():
-			logrus.Debug("Builder: build cancelled!")
-			fmt.Fprint(b.Stdout, "Build cancelled")
-			buildsFailed.WithValues(metricsBuildCanceled).Inc()
-			return nil, errors.New("Build cancelled")
-		default:
-			// Not cancelled yet, keep going...
-		}
+func processMetaArg(meta instructions.ArgCommand, shlex *ShellLex, args *buildArgs) error {
+	// ShellLex currently only support the concatenated string format
+	envs := convertMapToEnvList(args.GetAllAllowed())
+	if err := meta.Expand(func(word string) (string, error) {
+		return shlex.ProcessWord(word, envs)
+	}); err != nil {
+		return err
+	}
+	args.AddArg(meta.Key, meta.Value)
+	args.AddMetaArg(meta.Key, meta.Value)
+	return nil
+}
 
-		// If this is a FROM and we have a previous image then
-		// emit an aux message for that image since it is the
-		// end of the previous stage
-		if n.Value == command.From {
-			if err := emitImageID(b.Aux, state); err != nil {
-				return nil, err
-			}
-		}
+func printCommand(out io.Writer, currentCommandIndex int, totalCommands int, cmd interface{}) int {
+	fmt.Fprintf(out, stepFormat, currentCommandIndex, totalCommands, cmd)
+	fmt.Fprintln(out)
+	return currentCommandIndex + 1
+}
 
-		if n.Value == command.From && state.isCurrentStage(b.options.Target) {
-			break
-		}
+func (b *Builder) dispatchDockerfileWithCancellation(parseResult []instructions.Stage, metaArgs []instructions.ArgCommand, escapeToken rune, source builder.Source) (*dispatchState, error) {
+	dispatchRequest := dispatchRequest{}
+	buildArgs := newBuildArgs(b.options.BuildArgs)
+	totalCommands := len(metaArgs) + len(parseResult)
+	currentCommandIndex := 1
+	for _, stage := range parseResult {
+		totalCommands += len(stage.Commands)
+	}
+	shlex := NewShellLex(escapeToken)
+	for _, meta := range metaArgs {
+		currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, &meta)
 
-		opts := dispatchOptions{
-			state:   state,
-			stepMsg: formatStep(i, total),
-			node:    n,
-			shlex:   shlex,
-			source:  source,
-		}
-		if state, err = b.dispatch(opts); err != nil {
-			if b.options.ForceRemove {
-				b.containerManager.RemoveAll(b.Stdout)
-			}
+		err := processMetaArg(meta, shlex, buildArgs)
+		if err != nil {
 			return nil, err
 		}
+	}
 
-		fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(state.imageID))
-		if b.options.Remove {
-			b.containerManager.RemoveAll(b.Stdout)
+	stagesResults := newStagesBuildResults()
+
+	for _, stage := range parseResult {
+		if err := stagesResults.checkStageNameAvailable(stage.Name); err != nil {
+			return nil, err
+		}
+		dispatchRequest = newDispatchRequest(b, escapeToken, source, buildArgs, stagesResults)
+
+		currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, stage.SourceCode)
+		if err := initializeStage(dispatchRequest, &stage); err != nil {
+			return nil, err
+		}
+		dispatchRequest.state.updateRunConfig()
+		fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID))
+		for _, cmd := range stage.Commands {
+			select {
+			case <-b.clientCtx.Done():
+				logrus.Debug("Builder: build cancelled!")
+				fmt.Fprint(b.Stdout, "Build cancelled\n")
+				buildsFailed.WithValues(metricsBuildCanceled).Inc()
+				return nil, errors.New("Build cancelled")
+			default:
+				// Not cancelled yet, keep going...
+			}
+
+			currentCommandIndex = printCommand(b.Stdout, currentCommandIndex, totalCommands, cmd)
+
+			if err := dispatch(dispatchRequest, cmd); err != nil {
+				return nil, err
+			}
+			dispatchRequest.state.updateRunConfig()
+			fmt.Fprintf(b.Stdout, " ---> %s\n", stringid.TruncateID(dispatchRequest.state.imageID))
+
+		}
+		if err := emitImageID(b.Aux, dispatchRequest.state); err != nil {
+			return nil, err
+		}
+		buildArgs.MergeReferencedArgs(dispatchRequest.state.buildArgs)
+		if err := commitStage(dispatchRequest.state, stagesResults); err != nil {
+			return nil, err
 		}
 	}
-
-	// Emit a final aux message for the final image
-	if err := emitImageID(b.Aux, state); err != nil {
-		return nil, err
-	}
-
-	return state, nil
+	buildArgs.WarnOnUnusedBuildArgs(b.Stdout)
+	return dispatchRequest.state, nil
 }
 
 func addNodesForLabelOption(dockerfile *parser.Node, labels map[string]string) {
@@ -351,25 +361,19 @@
 		return config, nil
 	}
 
-	b := newBuilder(context.Background(), builderOptions{
-		Options: &types.ImageBuildOptions{NoCache: true},
-	})
-
 	dockerfile, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")))
 	if err != nil {
 		return nil, validationError{err}
 	}
 
-	// TODO @jhowardmsft LCOW support. For now, if LCOW enabled, switch to linux.
-	// Also explicitly set the platform. Ultimately this will be in the builder
-	// options, but we can't do that yet as it would change the API.
-	if dockerfile.Platform == "" {
-		dockerfile.Platform = runtime.GOOS
+	os := runtime.GOOS
+	if dockerfile.OS != "" {
+		os = dockerfile.OS
 	}
-	if dockerfile.Platform == "windows" && system.LCOWSupported() {
-		dockerfile.Platform = "linux"
-	}
-	b.platform = dockerfile.Platform
+
+	b := newBuilder(context.Background(), builderOptions{
+		Options: &types.ImageBuildOptions{NoCache: true},
+	}, os)
 
 	// ensure that the commands are valid
 	for _, n := range dockerfile.AST.Children {
@@ -382,39 +386,33 @@
 	b.Stderr = ioutil.Discard
 	b.disableCommit = true
 
-	if err := checkDispatchDockerfile(dockerfile.AST); err != nil {
-		return nil, validationError{err}
+	commands := []instructions.Command{}
+	for _, n := range dockerfile.AST.Children {
+		cmd, err := instructions.ParseCommand(n)
+		if err != nil {
+			return nil, validationError{err}
+		}
+		commands = append(commands, cmd)
 	}
-	dispatchState := newDispatchState()
-	dispatchState.runConfig = config
-	return dispatchFromDockerfile(b, dockerfile, dispatchState, nil)
+
+	dispatchRequest := newDispatchRequest(b, dockerfile.EscapeToken, nil, newBuildArgs(b.options.BuildArgs), newStagesBuildResults())
+	dispatchRequest.state.runConfig = config
+	dispatchRequest.state.imageID = config.Image
+	for _, cmd := range commands {
+		err := dispatch(dispatchRequest, cmd)
+		if err != nil {
+			return nil, validationError{err}
+		}
+		dispatchRequest.state.updateRunConfig()
+	}
+
+	return dispatchRequest.state.runConfig, nil
 }
 
-func checkDispatchDockerfile(dockerfile *parser.Node) error {
-	for _, n := range dockerfile.Children {
-		if err := checkDispatch(n); err != nil {
-			return errors.Wrapf(err, "Dockerfile parse error line %d", n.StartLine)
-		}
+func convertMapToEnvList(m map[string]string) []string {
+	result := []string{}
+	for k, v := range m {
+		result = append(result, k+"="+v)
 	}
-	return nil
-}
-
-func dispatchFromDockerfile(b *Builder, result *parser.Result, dispatchState *dispatchState, source builder.Source) (*container.Config, error) {
-	shlex := NewShellLex(result.EscapeToken)
-	ast := result.AST
-	total := len(ast.Children)
-
-	for i, n := range ast.Children {
-		opts := dispatchOptions{
-			state:   dispatchState,
-			stepMsg: formatStep(i, total),
-			node:    n,
-			shlex:   shlex,
-			source:  source,
-		}
-		if _, err := b.dispatch(opts); err != nil {
-			return nil, err
-		}
-	}
-	return dispatchState.runConfig, nil
+	return result
 }
diff --git a/builder/dockerfile/builder_unix.go b/builder/dockerfile/builder_unix.go
index 5ea63da..91896af 100644
--- a/builder/dockerfile/builder_unix.go
+++ b/builder/dockerfile/builder_unix.go
@@ -2,6 +2,6 @@
 
 package dockerfile
 
-func defaultShellForPlatform(platform string) []string {
+func defaultShellForOS(os string) []string {
 	return []string{"/bin/sh", "-c"}
 }
diff --git a/builder/dockerfile/builder_windows.go b/builder/dockerfile/builder_windows.go
index 7bfef32..fa573a0 100644
--- a/builder/dockerfile/builder_windows.go
+++ b/builder/dockerfile/builder_windows.go
@@ -1,7 +1,7 @@
 package dockerfile
 
-func defaultShellForPlatform(platform string) []string {
-	if platform == "linux" {
+func defaultShellForOS(os string) []string {
+	if os == "linux" {
 		return []string{"/bin/sh", "-c"}
 	}
 	return []string{"cmd", "/S", "/C"}
diff --git a/builder/dockerfile/clientsession.go b/builder/dockerfile/clientsession.go
index 9a54116..2ef5bf5 100644
--- a/builder/dockerfile/clientsession.go
+++ b/builder/dockerfile/clientsession.go
@@ -41,7 +41,6 @@
 type ClientSessionSourceIdentifier struct {
 	includePatterns []string
 	caller          session.Caller
-	sharedKey       string
 	uuid            string
 }
 
diff --git a/builder/dockerfile/containerbackend.go b/builder/dockerfile/containerbackend.go
index d11c952..add0a87 100644
--- a/builder/dockerfile/containerbackend.go
+++ b/builder/dockerfile/containerbackend.go
@@ -32,7 +32,6 @@
 	container, err := c.backend.ContainerCreate(types.ContainerCreateConfig{
 		Config:     runConfig,
 		HostConfig: hostConfig,
-		Platform:   platform,
 	})
 	if err != nil {
 		return container, err
@@ -103,7 +102,7 @@
 
 func logCancellationError(cancelErrCh chan error, msg string) {
 	if cancelErr := <-cancelErrCh; cancelErr != nil {
-		logrus.Debugf("Build cancelled (%v): ", cancelErr, msg)
+		logrus.Debugf("Build cancelled (%v): %s", cancelErr, msg)
 	}
 }
 
diff --git a/builder/dockerfile/copy.go b/builder/dockerfile/copy.go
index 223623c..92ba006 100644
--- a/builder/dockerfile/copy.go
+++ b/builder/dockerfile/copy.go
@@ -1,12 +1,15 @@
 package dockerfile
 
 import (
+	"archive/tar"
 	"fmt"
 	"io"
+	"mime"
 	"net/http"
 	"net/url"
 	"os"
 	"path/filepath"
+	"runtime"
 	"sort"
 	"strings"
 	"time"
@@ -14,16 +17,18 @@
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder/remotecontext"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/progress"
 	"github.com/docker/docker/pkg/streamformatter"
-	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/pkg/errors"
 )
 
+const unnamedFilename = "__unnamed__"
+
 type pathCache interface {
 	Load(key interface{}) (value interface{}, ok bool)
 	Store(key, value interface{})
@@ -32,14 +37,14 @@
 // copyInfo is a data object which stores the metadata about each source file in
 // a copyInstruction
 type copyInfo struct {
-	root         string
+	root         containerfs.ContainerFS
 	path         string
 	hash         string
 	noDecompress bool
 }
 
 func (c copyInfo) fullPath() (string, error) {
-	return symlink.FollowSymlinkInScope(filepath.Join(c.root, c.path), c.root)
+	return c.root.ResolveScopedPath(c.path, true)
 }
 
 func newCopyInfoFromSource(source builder.Source, path string, hash string) copyInfo {
@@ -68,6 +73,7 @@
 	pathCache   pathCache
 	download    sourceDownloader
 	tmpPaths    []string
+	platform    string
 }
 
 func copierFromDispatchRequest(req dispatchRequest, download sourceDownloader, imageSource *imageMount) copier {
@@ -76,6 +82,7 @@
 		pathCache:   req.builder.pathCache,
 		download:    download,
 		imageSource: imageSource,
+		platform:    req.builder.options.Platform,
 	}
 }
 
@@ -83,14 +90,14 @@
 	inst := copyInstruction{cmdName: cmdName}
 	last := len(args) - 1
 
-	// Work in daemon-specific filepath semantics
-	inst.dest = filepath.FromSlash(args[last])
-
-	infos, err := o.getCopyInfosForSourcePaths(args[0:last])
+	// Work in platform-specific filepath semantics
+	inst.dest = fromSlash(args[last], o.platform)
+	separator := string(separator(o.platform))
+	infos, err := o.getCopyInfosForSourcePaths(args[0:last], inst.dest)
 	if err != nil {
 		return inst, errors.Wrapf(err, "%s failed", cmdName)
 	}
-	if len(infos) > 1 && !strings.HasSuffix(inst.dest, string(os.PathSeparator)) {
+	if len(infos) > 1 && !strings.HasSuffix(inst.dest, separator) {
 		return inst, errors.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName)
 	}
 	inst.infos = infos
@@ -99,10 +106,11 @@
 
 // getCopyInfosForSourcePaths iterates over the source files and calculate the info
 // needed to copy (e.g. hash value if cached)
-func (o *copier) getCopyInfosForSourcePaths(sources []string) ([]copyInfo, error) {
+// The dest is used in case source is URL (and ends with "/")
+func (o *copier) getCopyInfosForSourcePaths(sources []string, dest string) ([]copyInfo, error) {
 	var infos []copyInfo
 	for _, orig := range sources {
-		subinfos, err := o.getCopyInfoForSourcePath(orig)
+		subinfos, err := o.getCopyInfoForSourcePath(orig, dest)
 		if err != nil {
 			return nil, err
 		}
@@ -115,15 +123,24 @@
 	return infos, nil
 }
 
-func (o *copier) getCopyInfoForSourcePath(orig string) ([]copyInfo, error) {
+func (o *copier) getCopyInfoForSourcePath(orig, dest string) ([]copyInfo, error) {
 	if !urlutil.IsURL(orig) {
 		return o.calcCopyInfo(orig, true)
 	}
+
 	remote, path, err := o.download(orig)
 	if err != nil {
 		return nil, err
 	}
-	o.tmpPaths = append(o.tmpPaths, remote.Root())
+	// If path == "" then we are unable to determine filename from src
+	// We have to make sure dest is available
+	if path == "" {
+		if strings.HasSuffix(dest, "/") {
+			return nil, errors.Errorf("cannot determine filename for source %s", orig)
+		}
+		path = unnamedFilename
+	}
+	o.tmpPaths = append(o.tmpPaths, remote.Root().Path())
 
 	hash, err := remote.Hash(path)
 	ci := newCopyInfoFromSource(remote, path, hash)
@@ -143,14 +160,6 @@
 // TODO: allowWildcards can probably be removed by refactoring this function further.
 func (o *copier) calcCopyInfo(origPath string, allowWildcards bool) ([]copyInfo, error) {
 	imageSource := o.imageSource
-	if err := validateCopySourcePath(imageSource, origPath); err != nil {
-		return nil, err
-	}
-
-	// Work in daemon-specific OS filepath semantics
-	origPath = filepath.FromSlash(origPath)
-	origPath = strings.TrimPrefix(origPath, string(os.PathSeparator))
-	origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator))
 
 	// TODO: do this when creating copier. Requires validateCopySourcePath
 	// (and other below) to be aware of the difference sources. Why is it only
@@ -167,8 +176,20 @@
 		return nil, errors.Errorf("missing build context")
 	}
 
+	root := o.source.Root()
+
+	if err := validateCopySourcePath(imageSource, origPath, root.OS()); err != nil {
+		return nil, err
+	}
+
+	// Work in source OS specific filepath semantics
+	// For LCOW, this is NOT the daemon OS.
+	origPath = root.FromSlash(origPath)
+	origPath = strings.TrimPrefix(origPath, string(root.Separator()))
+	origPath = strings.TrimPrefix(origPath, "."+string(root.Separator()))
+
 	// Deal with wildcards
-	if allowWildcards && containsWildcards(origPath) {
+	if allowWildcards && containsWildcards(origPath, root.OS()) {
 		return o.copyWithWildcards(origPath)
 	}
 
@@ -200,6 +221,19 @@
 	return newCopyInfos(newCopyInfoFromSource(o.source, origPath, hash)), nil
 }
 
+func containsWildcards(name, platform string) bool {
+	isWindows := platform == "windows"
+	for i := 0; i < len(name); i++ {
+		ch := name[i]
+		if ch == '\\' && !isWindows {
+			i++
+		} else if ch == '*' || ch == '?' || ch == '[' {
+			return true
+		}
+	}
+	return false
+}
+
 func (o *copier) storeInPathCache(im *imageMount, path string, hash string) {
 	if im != nil {
 		o.pathCache.Store(im.ImageID()+path, hash)
@@ -207,12 +241,13 @@
 }
 
 func (o *copier) copyWithWildcards(origPath string) ([]copyInfo, error) {
+	root := o.source.Root()
 	var copyInfos []copyInfo
-	if err := filepath.Walk(o.source.Root(), func(path string, info os.FileInfo, err error) error {
+	if err := root.Walk(root.Path(), func(path string, info os.FileInfo, err error) error {
 		if err != nil {
 			return err
 		}
-		rel, err := remotecontext.Rel(o.source.Root(), path)
+		rel, err := remotecontext.Rel(root, path)
 		if err != nil {
 			return err
 		}
@@ -220,7 +255,7 @@
 		if rel == "." {
 			return nil
 		}
-		if match, _ := filepath.Match(origPath, rel); !match {
+		if match, _ := root.Match(origPath, rel); !match {
 			return nil
 		}
 
@@ -262,7 +297,7 @@
 	}
 	// Must be a dir
 	var subfiles []string
-	err = filepath.Walk(fp, func(path string, info os.FileInfo, err error) error {
+	err = source.Root().Walk(fp, func(path string, info os.FileInfo, err error) error {
 		if err != nil {
 			return err
 		}
@@ -301,22 +336,40 @@
 	return nil, "", errors.New("source can't be a URL for COPY")
 }
 
+func getFilenameForDownload(path string, resp *http.Response) string {
+	// Guess filename based on source
+	if path != "" && !strings.HasSuffix(path, "/") {
+		if filename := filepath.Base(filepath.FromSlash(path)); filename != "" {
+			return filename
+		}
+	}
+
+	// Guess filename based on Content-Disposition
+	if contentDisposition := resp.Header.Get("Content-Disposition"); contentDisposition != "" {
+		if _, params, err := mime.ParseMediaType(contentDisposition); err == nil {
+			if params["filename"] != "" && !strings.HasSuffix(params["filename"], "/") {
+				if filename := filepath.Base(filepath.FromSlash(params["filename"])); filename != "" {
+					return filename
+				}
+			}
+		}
+	}
+	return ""
+}
+
 func downloadSource(output io.Writer, stdout io.Writer, srcURL string) (remote builder.Source, p string, err error) {
 	u, err := url.Parse(srcURL)
 	if err != nil {
 		return
 	}
-	filename := filepath.Base(filepath.FromSlash(u.Path)) // Ensure in platform semantics
-	if filename == "" {
-		err = errors.Errorf("cannot determine filename from url: %s", u)
-		return
-	}
 
 	resp, err := remotecontext.GetWithStatusError(srcURL)
 	if err != nil {
 		return
 	}
 
+	filename := getFilenameForDownload(u.Path, resp)
+
 	// Prepare file in a tmp dir
 	tmpDir, err := ioutils.TempDir("", "docker-remote")
 	if err != nil {
@@ -327,7 +380,13 @@
 			os.RemoveAll(tmpDir)
 		}
 	}()
-	tmpFileName := filepath.Join(tmpDir, filename)
+	// If filename is empty, the returned filename will be "" but
+	// the tmp filename will be created as "__unnamed__"
+	tmpFileName := filename
+	if filename == "" {
+		tmpFileName = unnamedFilename
+	}
+	tmpFileName = filepath.Join(tmpDir, tmpFileName)
 	tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
 	if err != nil {
 		return
@@ -363,14 +422,19 @@
 		return
 	}
 
-	lc, err := remotecontext.NewLazySource(tmpDir)
+	lc, err := remotecontext.NewLazySource(containerfs.NewLocalContainerFS(tmpDir))
 	return lc, filename, err
 }
 
 type copyFileOptions struct {
 	decompress bool
-	archiver   *archive.Archiver
 	chownPair  idtools.IDPair
+	archiver   Archiver
+}
+
+type copyEndpoint struct {
+	driver containerfs.Driver
+	path   string
 }
 
 func performCopyForInfo(dest copyInfo, source copyInfo, options copyFileOptions) error {
@@ -378,6 +442,7 @@
 	if err != nil {
 		return err
 	}
+
 	destPath, err := dest.fullPath()
 	if err != nil {
 		return err
@@ -385,59 +450,90 @@
 
 	archiver := options.archiver
 
-	src, err := os.Stat(srcPath)
+	srcEndpoint := &copyEndpoint{driver: source.root, path: srcPath}
+	destEndpoint := &copyEndpoint{driver: dest.root, path: destPath}
+
+	src, err := source.root.Stat(srcPath)
 	if err != nil {
 		return errors.Wrapf(err, "source path not found")
 	}
 	if src.IsDir() {
-		return copyDirectory(archiver, srcPath, destPath, options.chownPair)
+		return copyDirectory(archiver, srcEndpoint, destEndpoint, options.chownPair)
 	}
-	if options.decompress && archive.IsArchivePath(srcPath) && !source.noDecompress {
+	if options.decompress && isArchivePath(source.root, srcPath) && !source.noDecompress {
 		return archiver.UntarPath(srcPath, destPath)
 	}
 
-	destExistsAsDir, err := isExistingDirectory(destPath)
+	destExistsAsDir, err := isExistingDirectory(destEndpoint)
 	if err != nil {
 		return err
 	}
 	// dest.path must be used because destPath has already been cleaned of any
 	// trailing slash
-	if endsInSlash(dest.path) || destExistsAsDir {
+	if endsInSlash(dest.root, dest.path) || destExistsAsDir {
 		// source.path must be used to get the correct filename when the source
 		// is a symlink
-		destPath = filepath.Join(destPath, filepath.Base(source.path))
+		destPath = dest.root.Join(destPath, source.root.Base(source.path))
+		destEndpoint = &copyEndpoint{driver: dest.root, path: destPath}
 	}
-	return copyFile(archiver, srcPath, destPath, options.chownPair)
+	return copyFile(archiver, srcEndpoint, destEndpoint, options.chownPair)
 }
 
-func copyDirectory(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error {
+func isArchivePath(driver containerfs.ContainerFS, path string) bool {
+	file, err := driver.Open(path)
+	if err != nil {
+		return false
+	}
+	defer file.Close()
+	rdr, err := archive.DecompressStream(file)
+	if err != nil {
+		return false
+	}
+	r := tar.NewReader(rdr)
+	_, err = r.Next()
+	return err == nil
+}
+
+func copyDirectory(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
 	destExists, err := isExistingDirectory(dest)
 	if err != nil {
 		return errors.Wrapf(err, "failed to query destination path")
 	}
-	if err := archiver.CopyWithTar(source, dest); err != nil {
+
+	if err := archiver.CopyWithTar(source.path, dest.path); err != nil {
 		return errors.Wrapf(err, "failed to copy directory")
 	}
-	return fixPermissions(source, dest, chownPair, !destExists)
+	// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
+	return fixPermissions(source.path, dest.path, chownPair, !destExists)
 }
 
-func copyFile(archiver *archive.Archiver, source, dest string, chownPair idtools.IDPair) error {
-	if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest), 0755, chownPair); err != nil {
-		return errors.Wrapf(err, "failed to create new directory")
+func copyFile(archiver Archiver, source, dest *copyEndpoint, chownPair idtools.IDPair) error {
+	if runtime.GOOS == "windows" && dest.driver.OS() == "linux" {
+		// LCOW
+		if err := dest.driver.MkdirAll(dest.driver.Dir(dest.path), 0755); err != nil {
+			return errors.Wrapf(err, "failed to create new directory")
+		}
+	} else {
+		if err := idtools.MkdirAllAndChownNew(filepath.Dir(dest.path), 0755, chownPair); err != nil {
+			// Normal containers
+			return errors.Wrapf(err, "failed to create new directory")
+		}
 	}
-	if err := archiver.CopyFileWithTar(source, dest); err != nil {
+
+	if err := archiver.CopyFileWithTar(source.path, dest.path); err != nil {
 		return errors.Wrapf(err, "failed to copy file")
 	}
-	return fixPermissions(source, dest, chownPair, false)
+	// TODO: @gupta-ak. Investigate how LCOW permission mappings will work.
+	return fixPermissions(source.path, dest.path, chownPair, false)
 }
 
-func endsInSlash(path string) bool {
-	return strings.HasSuffix(path, string(os.PathSeparator))
+func endsInSlash(driver containerfs.Driver, path string) bool {
+	return strings.HasSuffix(path, string(driver.Separator()))
 }
 
 // isExistingDirectory returns true if the path exists and is a directory
-func isExistingDirectory(path string) (bool, error) {
-	destStat, err := os.Stat(path)
+func isExistingDirectory(point *copyEndpoint) (bool, error) {
+	destStat, err := point.driver.Stat(point.path)
 	switch {
 	case os.IsNotExist(err):
 		return false, nil
diff --git a/builder/dockerfile/copy_test.go b/builder/dockerfile/copy_test.go
index ed384fd..87c5675 100644
--- a/builder/dockerfile/copy_test.go
+++ b/builder/dockerfile/copy_test.go
@@ -1,8 +1,10 @@
 package dockerfile
 
 import (
+	"net/http"
 	"testing"
 
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/gotestyourself/gotestyourself/fs"
 	"github.com/stretchr/testify/assert"
 )
@@ -36,10 +38,110 @@
 	}
 
 	for _, testcase := range testcases {
-		result, err := isExistingDirectory(testcase.path)
+		result, err := isExistingDirectory(&copyEndpoint{driver: containerfs.NewLocalDriver(), path: testcase.path})
 		if !assert.NoError(t, err) {
 			continue
 		}
 		assert.Equal(t, testcase.expected, result, testcase.doc)
 	}
 }
+
+func TestGetFilenameForDownload(t *testing.T) {
+	var testcases = []struct {
+		path        string
+		disposition string
+		expected    string
+	}{
+		{
+			path:     "http://www.example.com/",
+			expected: "",
+		},
+		{
+			path:     "http://www.example.com/xyz",
+			expected: "xyz",
+		},
+		{
+			path:     "http://www.example.com/xyz.html",
+			expected: "xyz.html",
+		},
+		{
+			path:     "http://www.example.com/xyz/",
+			expected: "",
+		},
+		{
+			path:     "http://www.example.com/xyz/uvw",
+			expected: "uvw",
+		},
+		{
+			path:     "http://www.example.com/xyz/uvw.html",
+			expected: "uvw.html",
+		},
+		{
+			path:     "http://www.example.com/xyz/uvw/",
+			expected: "",
+		},
+		{
+			path:     "/",
+			expected: "",
+		},
+		{
+			path:     "/xyz",
+			expected: "xyz",
+		},
+		{
+			path:     "/xyz.html",
+			expected: "xyz.html",
+		},
+		{
+			path:     "/xyz/",
+			expected: "",
+		},
+		{
+			path:        "/xyz/",
+			disposition: "attachment; filename=xyz.html",
+			expected:    "xyz.html",
+		},
+		{
+			disposition: "",
+			expected:    "",
+		},
+		{
+			disposition: "attachment; filename=xyz",
+			expected:    "xyz",
+		},
+		{
+			disposition: "attachment; filename=xyz.html",
+			expected:    "xyz.html",
+		},
+		{
+			disposition: "attachment; filename=\"xyz\"",
+			expected:    "xyz",
+		},
+		{
+			disposition: "attachment; filename=\"xyz.html\"",
+			expected:    "xyz.html",
+		},
+		{
+			disposition: "attachment; filename=\"/xyz.html\"",
+			expected:    "xyz.html",
+		},
+		{
+			disposition: "attachment; filename=\"/xyz/uvw\"",
+			expected:    "uvw",
+		},
+		{
+			disposition: "attachment; filename=\"Naïve file.txt\"",
+			expected:    "Naïve file.txt",
+		},
+	}
+	for _, testcase := range testcases {
+		resp := http.Response{
+			Header: make(map[string][]string),
+		}
+		if testcase.disposition != "" {
+			resp.Header.Add("Content-Disposition", testcase.disposition)
+		}
+		filename := getFilenameForDownload(testcase.path, &resp)
+		assert.Equal(t, testcase.expected, filename)
+	}
+}
diff --git a/builder/dockerfile/copy_unix.go b/builder/dockerfile/copy_unix.go
index a4a5e05..8833700 100644
--- a/builder/dockerfile/copy_unix.go
+++ b/builder/dockerfile/copy_unix.go
@@ -6,6 +6,7 @@
 	"os"
 	"path/filepath"
 
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 )
 
@@ -15,7 +16,8 @@
 		err           error
 	)
 	if !overrideSkip {
-		skipChownRoot, err = isExistingDirectory(destination)
+		destEndpoint := &copyEndpoint{driver: containerfs.NewLocalDriver(), path: destination}
+		skipChownRoot, err = isExistingDirectory(destEndpoint)
 		if err != nil {
 			return err
 		}
@@ -40,3 +42,7 @@
 		return os.Lchown(fullpath, rootIDs.UID, rootIDs.GID)
 	})
 }
+
+func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
+	return nil
+}
diff --git a/builder/dockerfile/copy_windows.go b/builder/dockerfile/copy_windows.go
index e4b15bc..f7fc6e0 100644
--- a/builder/dockerfile/copy_windows.go
+++ b/builder/dockerfile/copy_windows.go
@@ -1,8 +1,43 @@
 package dockerfile
 
-import "github.com/docker/docker/pkg/idtools"
+import (
+	"errors"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/pkg/idtools"
+)
+
+var pathBlacklist = map[string]bool{
+	"c:\\":        true,
+	"c:\\windows": true,
+}
 
 func fixPermissions(source, destination string, rootIDs idtools.IDPair, overrideSkip bool) error {
 	// chown is not supported on Windows
 	return nil
 }
+
+func validateCopySourcePath(imageSource *imageMount, origPath, platform string) error {
+	// validate windows paths from other images + LCOW
+	if imageSource == nil || platform != "windows" {
+		return nil
+	}
+
+	origPath = filepath.FromSlash(origPath)
+	p := strings.ToLower(filepath.Clean(origPath))
+	if !filepath.IsAbs(p) {
+		if filepath.VolumeName(p) != "" {
+			if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
+				p = p[:len(p)-1]
+			}
+			p += "\\"
+		} else {
+			p = filepath.Join("c:\\", p)
+		}
+	}
+	if _, blacklisted := pathBlacklist[p]; blacklisted {
+		return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
+	}
+	return nil
+}
diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go
index d8a8358..8d3ea7e 100644
--- a/builder/dockerfile/dispatchers.go
+++ b/builder/dockerfile/dispatchers.go
@@ -10,17 +10,15 @@
 import (
 	"bytes"
 	"fmt"
-	"regexp"
 	"runtime"
 	"sort"
-	"strconv"
 	"strings"
-	"time"
 
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/strslice"
 	"github.com/docker/docker/builder"
+	"github.com/docker/docker/builder/dockerfile/instructions"
 	"github.com/docker/docker/builder/dockerfile/parser"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/pkg/jsonmessage"
@@ -36,32 +34,14 @@
 // Sets the environment variable foo to bar, also makes interpolation
 // in the dockerfile available from the next statement on via ${foo}.
 //
-func env(req dispatchRequest) error {
-	if len(req.args) == 0 {
-		return errAtLeastOneArgument("ENV")
-	}
-
-	if len(req.args)%2 != 0 {
-		// should never get here, but just in case
-		return errTooManyArguments("ENV")
-	}
-
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	runConfig := req.state.runConfig
+func dispatchEnv(d dispatchRequest, c *instructions.EnvCommand) error {
+	runConfig := d.state.runConfig
 	commitMessage := bytes.NewBufferString("ENV")
+	for _, e := range c.Env {
+		name := e.Key
+		newVar := e.String()
 
-	for j := 0; j < len(req.args); j += 2 {
-		if len(req.args[j]) == 0 {
-			return errBlankCommandNames("ENV")
-		}
-		name := req.args[j]
-		value := req.args[j+1]
-		newVar := name + "=" + value
 		commitMessage.WriteString(" " + newVar)
-
 		gotOne := false
 		for i, envVar := range runConfig.Env {
 			envParts := strings.SplitN(envVar, "=", 2)
@@ -76,64 +56,32 @@
 			runConfig.Env = append(runConfig.Env, newVar)
 		}
 	}
-
-	return req.builder.commit(req.state, commitMessage.String())
+	return d.builder.commit(d.state, commitMessage.String())
 }
 
 // MAINTAINER some text <maybe@an.email.address>
 //
 // Sets the maintainer metadata.
-func maintainer(req dispatchRequest) error {
-	if len(req.args) != 1 {
-		return errExactlyOneArgument("MAINTAINER")
-	}
+func dispatchMaintainer(d dispatchRequest, c *instructions.MaintainerCommand) error {
 
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	maintainer := req.args[0]
-	req.state.maintainer = maintainer
-	return req.builder.commit(req.state, "MAINTAINER "+maintainer)
+	d.state.maintainer = c.Maintainer
+	return d.builder.commit(d.state, "MAINTAINER "+c.Maintainer)
 }
 
 // LABEL some json data describing the image
 //
 // Sets the Label variable foo to bar,
 //
-func label(req dispatchRequest) error {
-	if len(req.args) == 0 {
-		return errAtLeastOneArgument("LABEL")
+func dispatchLabel(d dispatchRequest, c *instructions.LabelCommand) error {
+	if d.state.runConfig.Labels == nil {
+		d.state.runConfig.Labels = make(map[string]string)
 	}
-	if len(req.args)%2 != 0 {
-		// should never get here, but just in case
-		return errTooManyArguments("LABEL")
-	}
-
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
 	commitStr := "LABEL"
-	runConfig := req.state.runConfig
-
-	if runConfig.Labels == nil {
-		runConfig.Labels = map[string]string{}
+	for _, v := range c.Labels {
+		d.state.runConfig.Labels[v.Key] = v.Value
+		commitStr += " " + v.String()
 	}
-
-	for j := 0; j < len(req.args); j++ {
-		name := req.args[j]
-		if name == "" {
-			return errBlankCommandNames("LABEL")
-		}
-
-		value := req.args[j+1]
-		commitStr += " " + name + "=" + value
-
-		runConfig.Labels[name] = value
-		j++
-	}
-	return req.builder.commit(req.state, commitStr)
+	return d.builder.commit(d.state, commitStr)
 }
 
 // ADD foo /path
@@ -141,257 +89,179 @@
 // Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
 // exist here. If you do not wish to have this automatic handling, use COPY.
 //
-func add(req dispatchRequest) error {
-	if len(req.args) < 2 {
-		return errAtLeastTwoArguments("ADD")
-	}
-
-	flChown := req.flags.AddString("chown", "")
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	downloader := newRemoteSourceDownloader(req.builder.Output, req.builder.Stdout)
-	copier := copierFromDispatchRequest(req, downloader, nil)
+func dispatchAdd(d dispatchRequest, c *instructions.AddCommand) error {
+	downloader := newRemoteSourceDownloader(d.builder.Output, d.builder.Stdout)
+	copier := copierFromDispatchRequest(d, downloader, nil)
 	defer copier.Cleanup()
-	copyInstruction, err := copier.createCopyInstruction(req.args, "ADD")
+
+	copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "ADD")
 	if err != nil {
 		return err
 	}
-	copyInstruction.chownStr = flChown.Value
+	copyInstruction.chownStr = c.Chown
 	copyInstruction.allowLocalDecompression = true
 
-	return req.builder.performCopy(req.state, copyInstruction)
+	return d.builder.performCopy(d.state, copyInstruction)
 }
 
 // COPY foo /path
 //
 // Same as 'ADD' but without the tar and remote url handling.
 //
-func dispatchCopy(req dispatchRequest) error {
-	if len(req.args) < 2 {
-		return errAtLeastTwoArguments("COPY")
+func dispatchCopy(d dispatchRequest, c *instructions.CopyCommand) error {
+	var im *imageMount
+	var err error
+	if c.From != "" {
+		im, err = d.getImageMount(c.From)
+		if err != nil {
+			return errors.Wrapf(err, "invalid from flag value %s", c.From)
+		}
 	}
-
-	flFrom := req.flags.AddString("from", "")
-	flChown := req.flags.AddString("chown", "")
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	im, err := req.builder.getImageMount(flFrom)
-	if err != nil {
-		return errors.Wrapf(err, "invalid from flag value %s", flFrom.Value)
-	}
-
-	copier := copierFromDispatchRequest(req, errOnSourceDownload, im)
+	copier := copierFromDispatchRequest(d, errOnSourceDownload, im)
 	defer copier.Cleanup()
-	copyInstruction, err := copier.createCopyInstruction(req.args, "COPY")
+	copyInstruction, err := copier.createCopyInstruction(c.SourcesAndDest, "COPY")
 	if err != nil {
 		return err
 	}
-	copyInstruction.chownStr = flChown.Value
+	copyInstruction.chownStr = c.Chown
 
-	return req.builder.performCopy(req.state, copyInstruction)
+	return d.builder.performCopy(d.state, copyInstruction)
 }
 
-func (b *Builder) getImageMount(fromFlag *Flag) (*imageMount, error) {
-	if !fromFlag.IsUsed() {
+func (d *dispatchRequest) getImageMount(imageRefOrID string) (*imageMount, error) {
+	if imageRefOrID == "" {
 		// TODO: this could return the source in the default case as well?
 		return nil, nil
 	}
 
 	var localOnly bool
-	imageRefOrID := fromFlag.Value
-	stage, err := b.buildStages.get(fromFlag.Value)
+	stage, err := d.stages.get(imageRefOrID)
 	if err != nil {
 		return nil, err
 	}
 	if stage != nil {
-		imageRefOrID = stage.ImageID()
+		imageRefOrID = stage.Image
 		localOnly = true
 	}
-	return b.imageSources.Get(imageRefOrID, localOnly)
+	return d.builder.imageSources.Get(imageRefOrID, localOnly)
 }
 
 // FROM imagename[:tag | @digest] [AS build-stage-name]
 //
-func from(req dispatchRequest) error {
-	stageName, err := parseBuildStageName(req.args)
+func initializeStage(d dispatchRequest, cmd *instructions.Stage) error {
+	d.builder.imageProber.Reset()
+	image, err := d.getFromImage(d.shlex, cmd.BaseName)
 	if err != nil {
 		return err
 	}
-
-	if err := req.flags.Parse(); err != nil {
-		return err
+	state := d.state
+	state.beginStage(cmd.Name, image)
+	if len(state.runConfig.OnBuild) > 0 {
+		triggers := state.runConfig.OnBuild
+		state.runConfig.OnBuild = nil
+		return dispatchTriggeredOnBuild(d, triggers)
 	}
-
-	req.builder.imageProber.Reset()
-	image, err := req.builder.getFromImage(req.shlex, req.args[0])
-	if err != nil {
-		return err
-	}
-	if err := req.builder.buildStages.add(stageName, image); err != nil {
-		return err
-	}
-	req.state.beginStage(stageName, image)
-	req.builder.buildArgs.ResetAllowed()
-	if image.ImageID() == "" {
-		// Typically this means they used "FROM scratch"
-		return nil
-	}
-
-	return processOnBuild(req)
+	return nil
 }
 
-func parseBuildStageName(args []string) (string, error) {
-	stageName := ""
-	switch {
-	case len(args) == 3 && strings.EqualFold(args[1], "as"):
-		stageName = strings.ToLower(args[2])
-		if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok {
-			return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName)
-		}
-	case len(args) != 1:
-		return "", errors.New("FROM requires either one or three arguments")
+func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error {
+	fmt.Fprintf(d.builder.Stdout, "# Executing %d build trigger", len(triggers))
+	if len(triggers) > 1 {
+		fmt.Fprint(d.builder.Stdout, "s")
 	}
-
-	return stageName, nil
-}
-
-// scratchImage is used as a token for the empty base image.
-var scratchImage builder.Image = &image.Image{}
-
-func (b *Builder) getFromImage(shlex *ShellLex, name string) (builder.Image, error) {
-	substitutionArgs := []string{}
-	for key, value := range b.buildArgs.GetAllMeta() {
-		substitutionArgs = append(substitutionArgs, key+"="+value)
-	}
-
-	name, err := shlex.ProcessWord(name, substitutionArgs)
-	if err != nil {
-		return nil, err
-	}
-
-	var localOnly bool
-	if stage, ok := b.buildStages.getByName(name); ok {
-		name = stage.ImageID()
-		localOnly = true
-	}
-
-	// Windows cannot support a container with no base image unless it is LCOW.
-	if name == api.NoBaseImageSpecifier {
-		if runtime.GOOS == "windows" {
-			if b.platform == "windows" || (b.platform != "windows" && !system.LCOWSupported()) {
-				return nil, errors.New("Windows does not support FROM scratch")
-			}
-		}
-		return scratchImage, nil
-	}
-	imageMount, err := b.imageSources.Get(name, localOnly)
-	if err != nil {
-		return nil, err
-	}
-	return imageMount.Image(), nil
-}
-
-func processOnBuild(req dispatchRequest) error {
-	dispatchState := req.state
-	// Process ONBUILD triggers if they exist
-	if nTriggers := len(dispatchState.runConfig.OnBuild); nTriggers != 0 {
-		word := "trigger"
-		if nTriggers > 1 {
-			word = "triggers"
-		}
-		fmt.Fprintf(req.builder.Stderr, "# Executing %d build %s...\n", nTriggers, word)
-	}
-
-	// Copy the ONBUILD triggers, and remove them from the config, since the config will be committed.
-	onBuildTriggers := dispatchState.runConfig.OnBuild
-	dispatchState.runConfig.OnBuild = []string{}
-
-	// Reset stdin settings as all build actions run without stdin
-	dispatchState.runConfig.OpenStdin = false
-	dispatchState.runConfig.StdinOnce = false
-
-	// parse the ONBUILD triggers by invoking the parser
-	for _, step := range onBuildTriggers {
-		dockerfile, err := parser.Parse(strings.NewReader(step))
+	fmt.Fprintln(d.builder.Stdout)
+	for _, trigger := range triggers {
+		d.state.updateRunConfig()
+		ast, err := parser.Parse(strings.NewReader(trigger))
 		if err != nil {
 			return err
 		}
-
-		for _, n := range dockerfile.AST.Children {
-			if err := checkDispatch(n); err != nil {
-				return err
-			}
-
-			upperCasedCmd := strings.ToUpper(n.Value)
-			switch upperCasedCmd {
-			case "ONBUILD":
-				return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
-			case "MAINTAINER", "FROM":
-				return errors.Errorf("%s isn't allowed as an ONBUILD trigger", upperCasedCmd)
-			}
+		if len(ast.AST.Children) != 1 {
+			return errors.New("onbuild trigger should be a single expression")
 		}
-
-		if _, err := dispatchFromDockerfile(req.builder, dockerfile, dispatchState, req.source); err != nil {
+		cmd, err := instructions.ParseCommand(ast.AST.Children[0])
+		if err != nil {
+			if instructions.IsUnknownInstruction(err) {
+				buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
+			}
+			return err
+		}
+		err = dispatch(d, cmd)
+		if err != nil {
 			return err
 		}
 	}
 	return nil
 }
 
-// ONBUILD RUN echo yo
-//
-// ONBUILD triggers run when the image is used in a FROM statement.
-//
-// ONBUILD handling has a lot of special-case functionality, the heading in
-// evaluator.go and comments around dispatch() in the same file explain the
-// special cases. search for 'OnBuild' in internals.go for additional special
-// cases.
-//
-func onbuild(req dispatchRequest) error {
-	if len(req.args) == 0 {
-		return errAtLeastOneArgument("ONBUILD")
+func (d *dispatchRequest) getExpandedImageName(shlex *ShellLex, name string) (string, error) {
+	substitutionArgs := []string{}
+	for key, value := range d.state.buildArgs.GetAllMeta() {
+		substitutionArgs = append(substitutionArgs, key+"="+value)
 	}
 
-	if err := req.flags.Parse(); err != nil {
-		return err
+	name, err := shlex.ProcessWord(name, substitutionArgs)
+	if err != nil {
+		return "", err
+	}
+	return name, nil
+}
+func (d *dispatchRequest) getImageOrStage(name string) (builder.Image, error) {
+	var localOnly bool
+	if im, ok := d.stages.getByName(name); ok {
+		name = im.Image
+		localOnly = true
 	}
 
-	triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0]))
-	switch triggerInstruction {
-	case "ONBUILD":
-		return errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
-	case "MAINTAINER", "FROM":
-		return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
+	// Windows cannot support a container with no base image unless it is LCOW.
+	if name == api.NoBaseImageSpecifier {
+		imageImage := &image.Image{}
+		imageImage.OS = runtime.GOOS
+		if runtime.GOOS == "windows" {
+			optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
+			switch optionsOS {
+			case "windows", "":
+				return nil, errors.New("Windows does not support FROM scratch")
+			case "linux":
+				if !system.LCOWSupported() {
+					return nil, errors.New("Linux containers are not supported on this system")
+				}
+				imageImage.OS = "linux"
+			default:
+				return nil, errors.Errorf("operating system %q is not supported", optionsOS)
+			}
+		}
+		return builder.Image(imageImage), nil
 	}
+	imageMount, err := d.builder.imageSources.Get(name, localOnly)
+	if err != nil {
+		return nil, err
+	}
+	return imageMount.Image(), nil
+}
+func (d *dispatchRequest) getFromImage(shlex *ShellLex, name string) (builder.Image, error) {
+	name, err := d.getExpandedImageName(shlex, name)
+	if err != nil {
+		return nil, err
+	}
+	return d.getImageOrStage(name)
+}
 
-	runConfig := req.state.runConfig
-	original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "")
-	runConfig.OnBuild = append(runConfig.OnBuild, original)
-	return req.builder.commit(req.state, "ONBUILD "+original)
+func dispatchOnbuild(d dispatchRequest, c *instructions.OnbuildCommand) error {
+
+	d.state.runConfig.OnBuild = append(d.state.runConfig.OnBuild, c.Expression)
+	return d.builder.commit(d.state, "ONBUILD "+c.Expression)
 }
 
 // WORKDIR /tmp
 //
 // Set the working directory for future RUN/CMD/etc statements.
 //
-func workdir(req dispatchRequest) error {
-	if len(req.args) != 1 {
-		return errExactlyOneArgument("WORKDIR")
-	}
-
-	err := req.flags.Parse()
-	if err != nil {
-		return err
-	}
-
-	runConfig := req.state.runConfig
-	// This is from the Dockerfile and will not necessarily be in platform
-	// specific semantics, hence ensure it is converted.
-	runConfig.WorkingDir, err = normalizeWorkdir(req.builder.platform, runConfig.WorkingDir, req.args[0])
+func dispatchWorkdir(d dispatchRequest, c *instructions.WorkdirCommand) error {
+	runConfig := d.state.runConfig
+	var err error
+	optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
+	runConfig.WorkingDir, err = normalizeWorkdir(optionsOS, runConfig.WorkingDir, c.Path)
 	if err != nil {
 		return err
 	}
@@ -400,23 +270,31 @@
 	// This avoids having an unnecessary expensive mount/unmount calls
 	// (on Windows in particular) during each container create.
 	// Prior to 1.13, the mkdir was deferred and not executed at this step.
-	if req.builder.disableCommit {
+	if d.builder.disableCommit {
 		// Don't call back into the daemon if we're going through docker commit --change "WORKDIR /foo".
 		// We've already updated the runConfig and that's enough.
 		return nil
 	}
 
 	comment := "WORKDIR " + runConfig.WorkingDir
-	runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, req.builder.platform))
-	containerID, err := req.builder.probeAndCreate(req.state, runConfigWithCommentCmd)
+	runConfigWithCommentCmd := copyRunConfig(runConfig, withCmdCommentString(comment, optionsOS))
+	containerID, err := d.builder.probeAndCreate(d.state, runConfigWithCommentCmd)
 	if err != nil || containerID == "" {
 		return err
 	}
-	if err := req.builder.docker.ContainerCreateWorkdir(containerID); err != nil {
+	if err := d.builder.docker.ContainerCreateWorkdir(containerID); err != nil {
 		return err
 	}
 
-	return req.builder.commitContainer(req.state, containerID, runConfigWithCommentCmd)
+	return d.builder.commitContainer(d.state, containerID, runConfigWithCommentCmd)
+}
+
+func resolveCmdLine(cmd instructions.ShellDependantCmdLine, runConfig *container.Config, platform string) []string {
+	result := cmd.CmdLine
+	if cmd.PrependShell && result != nil {
+		result = append(getShell(runConfig, platform), result...)
+	}
+	return result
 }
 
 // RUN some command yo
@@ -429,32 +307,22 @@
 // RUN echo hi          # cmd /S /C echo hi   (Windows)
 // RUN [ "echo", "hi" ] # echo hi
 //
-func run(req dispatchRequest) error {
-	if !req.state.hasFromImage() {
-		return errors.New("Please provide a source image with `from` prior to run")
-	}
+func dispatchRun(d dispatchRequest, c *instructions.RunCommand) error {
 
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	stateRunConfig := req.state.runConfig
-	args := handleJSONArgs(req.args, req.attributes)
-	if !req.attributes["json"] {
-		args = append(getShell(stateRunConfig, req.builder.platform), args...)
-	}
-	cmdFromArgs := strslice.StrSlice(args)
-	buildArgs := req.builder.buildArgs.FilterAllowed(stateRunConfig.Env)
+	stateRunConfig := d.state.runConfig
+	optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
+	cmdFromArgs := resolveCmdLine(c.ShellDependantCmdLine, stateRunConfig, optionsOS)
+	buildArgs := d.state.buildArgs.FilterAllowed(stateRunConfig.Env)
 
 	saveCmd := cmdFromArgs
 	if len(buildArgs) > 0 {
-		saveCmd = prependEnvOnCmd(req.builder.buildArgs, buildArgs, cmdFromArgs)
+		saveCmd = prependEnvOnCmd(d.state.buildArgs, buildArgs, cmdFromArgs)
 	}
 
 	runConfigForCacheProbe := copyRunConfig(stateRunConfig,
 		withCmd(saveCmd),
 		withEntrypointOverride(saveCmd, nil))
-	hit, err := req.builder.probeCache(req.state, runConfigForCacheProbe)
+	hit, err := d.builder.probeCache(d.state, runConfigForCacheProbe)
 	if err != nil || hit {
 		return err
 	}
@@ -468,11 +336,11 @@
 	runConfig.ArgsEscaped = true
 
 	logrus.Debugf("[BUILDER] Command to be executed: %v", runConfig.Cmd)
-	cID, err := req.builder.create(runConfig)
+	cID, err := d.builder.create(runConfig)
 	if err != nil {
 		return err
 	}
-	if err := req.builder.containerManager.Run(req.builder.clientCtx, cID, req.builder.Stdout, req.builder.Stderr); err != nil {
+	if err := d.builder.containerManager.Run(d.builder.clientCtx, cID, d.builder.Stdout, d.builder.Stderr); err != nil {
 		if err, ok := err.(*statusCodeError); ok {
 			// TODO: change error type, because jsonmessage.JSONError assumes HTTP
 			return &jsonmessage.JSONError{
@@ -485,7 +353,7 @@
 		return err
 	}
 
-	return req.builder.commitContainer(req.state, cID, runConfigForCacheProbe)
+	return d.builder.commitContainer(d.state, cID, runConfigForCacheProbe)
 }
 
 // Derive the command to use for probeCache() and to commit in this container.
@@ -518,139 +386,40 @@
 // Set the default command to run in the container (which may be empty).
 // Argument handling is the same as RUN.
 //
-func cmd(req dispatchRequest) error {
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	runConfig := req.state.runConfig
-	cmdSlice := handleJSONArgs(req.args, req.attributes)
-	if !req.attributes["json"] {
-		cmdSlice = append(getShell(runConfig, req.builder.platform), cmdSlice...)
-	}
-
-	runConfig.Cmd = strslice.StrSlice(cmdSlice)
+func dispatchCmd(d dispatchRequest, c *instructions.CmdCommand) error {
+	runConfig := d.state.runConfig
+	optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
+	cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, optionsOS)
+	runConfig.Cmd = cmd
 	// set config as already being escaped, this prevents double escaping on windows
 	runConfig.ArgsEscaped = true
 
-	if err := req.builder.commit(req.state, fmt.Sprintf("CMD %q", cmdSlice)); err != nil {
+	if err := d.builder.commit(d.state, fmt.Sprintf("CMD %q", cmd)); err != nil {
 		return err
 	}
 
-	if len(req.args) != 0 {
-		req.state.cmdSet = true
+	if len(c.ShellDependantCmdLine.CmdLine) != 0 {
+		d.state.cmdSet = true
 	}
 
 	return nil
 }
 
-// parseOptInterval(flag) is the duration of flag.Value, or 0 if
-// empty. An error is reported if the value is given and less than minimum duration.
-func parseOptInterval(f *Flag) (time.Duration, error) {
-	s := f.Value
-	if s == "" {
-		return 0, nil
-	}
-	d, err := time.ParseDuration(s)
-	if err != nil {
-		return 0, err
-	}
-	if d < container.MinimumDuration {
-		return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration)
-	}
-	return d, nil
-}
-
 // HEALTHCHECK foo
 //
 // Set the default healthcheck command to run in the container (which may be empty).
 // Argument handling is the same as RUN.
 //
-func healthcheck(req dispatchRequest) error {
-	if len(req.args) == 0 {
-		return errAtLeastOneArgument("HEALTHCHECK")
+func dispatchHealthcheck(d dispatchRequest, c *instructions.HealthCheckCommand) error {
+	runConfig := d.state.runConfig
+	if runConfig.Healthcheck != nil {
+		oldCmd := runConfig.Healthcheck.Test
+		if len(oldCmd) > 0 && oldCmd[0] != "NONE" {
+			fmt.Fprintf(d.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd)
+		}
 	}
-	runConfig := req.state.runConfig
-	typ := strings.ToUpper(req.args[0])
-	args := req.args[1:]
-	if typ == "NONE" {
-		if len(args) != 0 {
-			return errors.New("HEALTHCHECK NONE takes no arguments")
-		}
-		test := strslice.StrSlice{typ}
-		runConfig.Healthcheck = &container.HealthConfig{
-			Test: test,
-		}
-	} else {
-		if runConfig.Healthcheck != nil {
-			oldCmd := runConfig.Healthcheck.Test
-			if len(oldCmd) > 0 && oldCmd[0] != "NONE" {
-				fmt.Fprintf(req.builder.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd)
-			}
-		}
-
-		healthcheck := container.HealthConfig{}
-
-		flInterval := req.flags.AddString("interval", "")
-		flTimeout := req.flags.AddString("timeout", "")
-		flStartPeriod := req.flags.AddString("start-period", "")
-		flRetries := req.flags.AddString("retries", "")
-
-		if err := req.flags.Parse(); err != nil {
-			return err
-		}
-
-		switch typ {
-		case "CMD":
-			cmdSlice := handleJSONArgs(args, req.attributes)
-			if len(cmdSlice) == 0 {
-				return errors.New("Missing command after HEALTHCHECK CMD")
-			}
-
-			if !req.attributes["json"] {
-				typ = "CMD-SHELL"
-			}
-
-			healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...))
-		default:
-			return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
-		}
-
-		interval, err := parseOptInterval(flInterval)
-		if err != nil {
-			return err
-		}
-		healthcheck.Interval = interval
-
-		timeout, err := parseOptInterval(flTimeout)
-		if err != nil {
-			return err
-		}
-		healthcheck.Timeout = timeout
-
-		startPeriod, err := parseOptInterval(flStartPeriod)
-		if err != nil {
-			return err
-		}
-		healthcheck.StartPeriod = startPeriod
-
-		if flRetries.Value != "" {
-			retries, err := strconv.ParseInt(flRetries.Value, 10, 32)
-			if err != nil {
-				return err
-			}
-			if retries < 1 {
-				return fmt.Errorf("--retries must be at least 1 (not %d)", retries)
-			}
-			healthcheck.Retries = int(retries)
-		} else {
-			healthcheck.Retries = 0
-		}
-
-		runConfig.Healthcheck = &healthcheck
-	}
-
-	return req.builder.commit(req.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck))
+	runConfig.Healthcheck = c.Health
+	return d.builder.commit(d.state, fmt.Sprintf("HEALTHCHECK %q", runConfig.Healthcheck))
 }
 
 // ENTRYPOINT /usr/sbin/nginx
@@ -661,33 +430,16 @@
 // Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint
 // is initialized at newBuilder time instead of through argument parsing.
 //
-func entrypoint(req dispatchRequest) error {
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	runConfig := req.state.runConfig
-	parsed := handleJSONArgs(req.args, req.attributes)
-
-	switch {
-	case req.attributes["json"]:
-		// ENTRYPOINT ["echo", "hi"]
-		runConfig.Entrypoint = strslice.StrSlice(parsed)
-	case len(parsed) == 0:
-		// ENTRYPOINT []
-		runConfig.Entrypoint = nil
-	default:
-		// ENTRYPOINT echo hi
-		runConfig.Entrypoint = strslice.StrSlice(append(getShell(runConfig, req.builder.platform), parsed[0]))
-	}
-
-	// when setting the entrypoint if a CMD was not explicitly set then
-	// set the command to nil
-	if !req.state.cmdSet {
+func dispatchEntrypoint(d dispatchRequest, c *instructions.EntrypointCommand) error {
+	runConfig := d.state.runConfig
+	optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
+	cmd := resolveCmdLine(c.ShellDependantCmdLine, runConfig, optionsOS)
+	runConfig.Entrypoint = cmd
+	if !d.state.cmdSet {
 		runConfig.Cmd = nil
 	}
 
-	return req.builder.commit(req.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint))
+	return d.builder.commit(d.state, fmt.Sprintf("ENTRYPOINT %q", runConfig.Entrypoint))
 }
 
 // EXPOSE 6667/tcp 7000/tcp
@@ -695,41 +447,33 @@
 // Expose ports for links and port mappings. This all ends up in
 // req.runConfig.ExposedPorts for runconfig.
 //
-func expose(req dispatchRequest) error {
-	portsTab := req.args
-
-	if len(req.args) == 0 {
-		return errAtLeastOneArgument("EXPOSE")
+func dispatchExpose(d dispatchRequest, c *instructions.ExposeCommand, envs []string) error {
+	// custom multi word expansion
+	// expose $FOO with FOO="80 443" is expanded as EXPOSE [80,443]. This is the only command supporting word to words expansion
+	// so the word processing has been de-generalized
+	ports := []string{}
+	for _, p := range c.Ports {
+		ps, err := d.shlex.ProcessWords(p, envs)
+		if err != nil {
+			return err
+		}
+		ports = append(ports, ps...)
 	}
+	c.Ports = ports
 
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	runConfig := req.state.runConfig
-	if runConfig.ExposedPorts == nil {
-		runConfig.ExposedPorts = make(nat.PortSet)
-	}
-
-	ports, _, err := nat.ParsePortSpecs(portsTab)
+	ps, _, err := nat.ParsePortSpecs(ports)
 	if err != nil {
 		return err
 	}
 
-	// instead of using ports directly, we build a list of ports and sort it so
-	// the order is consistent. This prevents cache burst where map ordering
-	// changes between builds
-	portList := make([]string, len(ports))
-	var i int
-	for port := range ports {
-		if _, exists := runConfig.ExposedPorts[port]; !exists {
-			runConfig.ExposedPorts[port] = struct{}{}
-		}
-		portList[i] = string(port)
-		i++
+	if d.state.runConfig.ExposedPorts == nil {
+		d.state.runConfig.ExposedPorts = make(nat.PortSet)
 	}
-	sort.Strings(portList)
-	return req.builder.commit(req.state, "EXPOSE "+strings.Join(portList, " "))
+	for p := range ps {
+		d.state.runConfig.ExposedPorts[p] = struct{}{}
+	}
+
+	return d.builder.commit(d.state, "EXPOSE "+strings.Join(c.Ports, " "))
 }
 
 // USER foo
@@ -737,62 +481,39 @@
 // Set the user to 'foo' for future commands and when running the
 // ENTRYPOINT/CMD at container run time.
 //
-func user(req dispatchRequest) error {
-	if len(req.args) != 1 {
-		return errExactlyOneArgument("USER")
-	}
-
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	req.state.runConfig.User = req.args[0]
-	return req.builder.commit(req.state, fmt.Sprintf("USER %v", req.args))
+func dispatchUser(d dispatchRequest, c *instructions.UserCommand) error {
+	d.state.runConfig.User = c.User
+	return d.builder.commit(d.state, fmt.Sprintf("USER %v", c.User))
 }
 
 // VOLUME /foo
 //
 // Expose the volume /foo for use. Will also accept the JSON array form.
 //
-func volume(req dispatchRequest) error {
-	if len(req.args) == 0 {
-		return errAtLeastOneArgument("VOLUME")
+func dispatchVolume(d dispatchRequest, c *instructions.VolumeCommand) error {
+	if d.state.runConfig.Volumes == nil {
+		d.state.runConfig.Volumes = map[string]struct{}{}
 	}
-
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-
-	runConfig := req.state.runConfig
-	if runConfig.Volumes == nil {
-		runConfig.Volumes = map[string]struct{}{}
-	}
-	for _, v := range req.args {
-		v = strings.TrimSpace(v)
+	for _, v := range c.Volumes {
 		if v == "" {
 			return errors.New("VOLUME specified can not be an empty string")
 		}
-		runConfig.Volumes[v] = struct{}{}
+		d.state.runConfig.Volumes[v] = struct{}{}
 	}
-	return req.builder.commit(req.state, fmt.Sprintf("VOLUME %v", req.args))
+	return d.builder.commit(d.state, fmt.Sprintf("VOLUME %v", c.Volumes))
 }
 
 // STOPSIGNAL signal
 //
 // Set the signal that will be used to kill the container.
-func stopSignal(req dispatchRequest) error {
-	if len(req.args) != 1 {
-		return errExactlyOneArgument("STOPSIGNAL")
-	}
+func dispatchStopSignal(d dispatchRequest, c *instructions.StopSignalCommand) error {
 
-	sig := req.args[0]
-	_, err := signal.ParseSignal(sig)
+	_, err := signal.ParseSignal(c.Signal)
 	if err != nil {
 		return validationError{err}
 	}
-
-	req.state.runConfig.StopSignal = sig
-	return req.builder.commit(req.state, fmt.Sprintf("STOPSIGNAL %v", req.args))
+	d.state.runConfig.StopSignal = c.Signal
+	return d.builder.commit(d.state, fmt.Sprintf("STOPSIGNAL %v", c.Signal))
 }
 
 // ARG name[=value]
@@ -800,89 +521,21 @@
 // Adds the variable foo to the trusted list of variables that can be passed
 // to builder using the --build-arg flag for expansion/substitution or passing to 'run'.
 // Dockerfile author may optionally set a default value of this variable.
-func arg(req dispatchRequest) error {
-	if len(req.args) != 1 {
-		return errExactlyOneArgument("ARG")
+func dispatchArg(d dispatchRequest, c *instructions.ArgCommand) error {
+
+	commitStr := "ARG " + c.Key
+	if c.Value != nil {
+		commitStr += "=" + *c.Value
 	}
 
-	var (
-		name       string
-		newValue   string
-		hasDefault bool
-	)
-
-	arg := req.args[0]
-	// 'arg' can just be a name or name-value pair. Note that this is different
-	// from 'env' that handles the split of name and value at the parser level.
-	// The reason for doing it differently for 'arg' is that we support just
-	// defining an arg and not assign it a value (while 'env' always expects a
-	// name-value pair). If possible, it will be good to harmonize the two.
-	if strings.Contains(arg, "=") {
-		parts := strings.SplitN(arg, "=", 2)
-		if len(parts[0]) == 0 {
-			return errBlankCommandNames("ARG")
-		}
-
-		name = parts[0]
-		newValue = parts[1]
-		hasDefault = true
-	} else {
-		name = arg
-		hasDefault = false
-	}
-
-	var value *string
-	if hasDefault {
-		value = &newValue
-	}
-	req.builder.buildArgs.AddArg(name, value)
-
-	// Arg before FROM doesn't add a layer
-	if !req.state.hasFromImage() {
-		req.builder.buildArgs.AddMetaArg(name, value)
-		return nil
-	}
-	return req.builder.commit(req.state, "ARG "+arg)
+	d.state.buildArgs.AddArg(c.Key, c.Value)
+	return d.builder.commit(d.state, commitStr)
 }
 
 // SHELL powershell -command
 //
 // Set the non-default shell to use.
-func shell(req dispatchRequest) error {
-	if err := req.flags.Parse(); err != nil {
-		return err
-	}
-	shellSlice := handleJSONArgs(req.args, req.attributes)
-	switch {
-	case len(shellSlice) == 0:
-		// SHELL []
-		return errAtLeastOneArgument("SHELL")
-	case req.attributes["json"]:
-		// SHELL ["powershell", "-command"]
-		req.state.runConfig.Shell = strslice.StrSlice(shellSlice)
-	default:
-		// SHELL powershell -command - not JSON
-		return errNotJSON("SHELL", req.original)
-	}
-	return req.builder.commit(req.state, fmt.Sprintf("SHELL %v", shellSlice))
-}
-
-func errAtLeastOneArgument(command string) error {
-	return fmt.Errorf("%s requires at least one argument", command)
-}
-
-func errExactlyOneArgument(command string) error {
-	return fmt.Errorf("%s requires exactly one argument", command)
-}
-
-func errAtLeastTwoArguments(command string) error {
-	return fmt.Errorf("%s requires at least two arguments", command)
-}
-
-func errBlankCommandNames(command string) error {
-	return fmt.Errorf("%s names can not be blank", command)
-}
-
-func errTooManyArguments(command string) error {
-	return fmt.Errorf("Bad input to %s, too many arguments", command)
+func dispatchShell(d dispatchRequest, c *instructions.ShellCommand) error {
+	d.state.runConfig.Shell = c.Shell
+	return d.builder.commit(d.state, fmt.Sprintf("SHELL %v", d.state.runConfig.Shell))
 }
diff --git a/builder/dockerfile/dispatchers_test.go b/builder/dockerfile/dispatchers_test.go
index fb1df83..dc9148b 100644
--- a/builder/dockerfile/dispatchers_test.go
+++ b/builder/dockerfile/dispatchers_test.go
@@ -1,198 +1,114 @@
 package dockerfile
 
 import (
-	"fmt"
-	"runtime"
-	"testing"
-
 	"bytes"
 	"context"
+	"runtime"
+	"testing"
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/strslice"
 	"github.com/docker/docker/builder"
-	"github.com/docker/docker/builder/dockerfile/parser"
-	"github.com/docker/docker/internal/testutil"
+	"github.com/docker/docker/builder/dockerfile/instructions"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/go-connections/nat"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
 
-type commandWithFunction struct {
-	name     string
-	function func(args []string) error
-}
-
-func withArgs(f dispatcher) func([]string) error {
-	return func(args []string) error {
-		return f(dispatchRequest{args: args})
-	}
-}
-
-func withBuilderAndArgs(builder *Builder, f dispatcher) func([]string) error {
-	return func(args []string) error {
-		return f(defaultDispatchReq(builder, args...))
-	}
-}
-
-func defaultDispatchReq(builder *Builder, args ...string) dispatchRequest {
-	return dispatchRequest{
-		builder: builder,
-		args:    args,
-		flags:   NewBFlags(),
-		shlex:   NewShellLex(parser.DefaultEscapeToken),
-		state:   &dispatchState{runConfig: &container.Config{}},
-	}
-}
-
 func newBuilderWithMockBackend() *Builder {
 	mockBackend := &MockBackend{}
 	ctx := context.Background()
 	b := &Builder{
-		options:       &types.ImageBuildOptions{},
+		options:       &types.ImageBuildOptions{Platform: runtime.GOOS},
 		docker:        mockBackend,
-		buildArgs:     newBuildArgs(make(map[string]*string)),
 		Stdout:        new(bytes.Buffer),
 		clientCtx:     ctx,
 		disableCommit: true,
 		imageSources: newImageSources(ctx, builderOptions{
-			Options: &types.ImageBuildOptions{},
+			Options: &types.ImageBuildOptions{Platform: runtime.GOOS},
 			Backend: mockBackend,
 		}),
-		buildStages:      newBuildStages(),
 		imageProber:      newImageProber(mockBackend, nil, runtime.GOOS, false),
 		containerManager: newContainerManager(mockBackend),
 	}
 	return b
 }
 
-func TestCommandsExactlyOneArgument(t *testing.T) {
-	commands := []commandWithFunction{
-		{"MAINTAINER", withArgs(maintainer)},
-		{"WORKDIR", withArgs(workdir)},
-		{"USER", withArgs(user)},
-		{"STOPSIGNAL", withArgs(stopSignal)},
-	}
-
-	for _, command := range commands {
-		err := command.function([]string{})
-		assert.EqualError(t, err, errExactlyOneArgument(command.name).Error())
-	}
-}
-
-func TestCommandsAtLeastOneArgument(t *testing.T) {
-	commands := []commandWithFunction{
-		{"ENV", withArgs(env)},
-		{"LABEL", withArgs(label)},
-		{"ONBUILD", withArgs(onbuild)},
-		{"HEALTHCHECK", withArgs(healthcheck)},
-		{"EXPOSE", withArgs(expose)},
-		{"VOLUME", withArgs(volume)},
-	}
-
-	for _, command := range commands {
-		err := command.function([]string{})
-		assert.EqualError(t, err, errAtLeastOneArgument(command.name).Error())
-	}
-}
-
-func TestCommandsAtLeastTwoArguments(t *testing.T) {
-	commands := []commandWithFunction{
-		{"ADD", withArgs(add)},
-		{"COPY", withArgs(dispatchCopy)}}
-
-	for _, command := range commands {
-		err := command.function([]string{"arg1"})
-		assert.EqualError(t, err, errAtLeastTwoArguments(command.name).Error())
-	}
-}
-
-func TestCommandsTooManyArguments(t *testing.T) {
-	commands := []commandWithFunction{
-		{"ENV", withArgs(env)},
-		{"LABEL", withArgs(label)}}
-
-	for _, command := range commands {
-		err := command.function([]string{"arg1", "arg2", "arg3"})
-		assert.EqualError(t, err, errTooManyArguments(command.name).Error())
-	}
-}
-
-func TestCommandsBlankNames(t *testing.T) {
-	builder := newBuilderWithMockBackend()
-	commands := []commandWithFunction{
-		{"ENV", withBuilderAndArgs(builder, env)},
-		{"LABEL", withBuilderAndArgs(builder, label)},
-	}
-
-	for _, command := range commands {
-		err := command.function([]string{"", ""})
-		assert.EqualError(t, err, errBlankCommandNames(command.name).Error())
-	}
-}
-
 func TestEnv2Variables(t *testing.T) {
 	b := newBuilderWithMockBackend()
-
-	args := []string{"var1", "val1", "var2", "val2"}
-	req := defaultDispatchReq(b, args...)
-	err := env(req)
+	sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	envCommand := &instructions.EnvCommand{
+		Env: instructions.KeyValuePairs{
+			instructions.KeyValuePair{Key: "var1", Value: "val1"},
+			instructions.KeyValuePair{Key: "var2", Value: "val2"},
+		},
+	}
+	err := dispatch(sb, envCommand)
 	require.NoError(t, err)
 
 	expected := []string{
-		fmt.Sprintf("%s=%s", args[0], args[1]),
-		fmt.Sprintf("%s=%s", args[2], args[3]),
+		"var1=val1",
+		"var2=val2",
 	}
-	assert.Equal(t, expected, req.state.runConfig.Env)
+	assert.Equal(t, expected, sb.state.runConfig.Env)
 }
 
 func TestEnvValueWithExistingRunConfigEnv(t *testing.T) {
 	b := newBuilderWithMockBackend()
-
-	args := []string{"var1", "val1"}
-	req := defaultDispatchReq(b, args...)
-	req.state.runConfig.Env = []string{"var1=old", "var2=fromenv"}
-	err := env(req)
+	sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	sb.state.runConfig.Env = []string{"var1=old", "var2=fromenv"}
+	envCommand := &instructions.EnvCommand{
+		Env: instructions.KeyValuePairs{
+			instructions.KeyValuePair{Key: "var1", Value: "val1"},
+		},
+	}
+	err := dispatch(sb, envCommand)
 	require.NoError(t, err)
-
 	expected := []string{
-		fmt.Sprintf("%s=%s", args[0], args[1]),
+		"var1=val1",
 		"var2=fromenv",
 	}
-	assert.Equal(t, expected, req.state.runConfig.Env)
+	assert.Equal(t, expected, sb.state.runConfig.Env)
 }
 
 func TestMaintainer(t *testing.T) {
 	maintainerEntry := "Some Maintainer <maintainer@example.com>"
-
 	b := newBuilderWithMockBackend()
-	req := defaultDispatchReq(b, maintainerEntry)
-	err := maintainer(req)
+	sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	cmd := &instructions.MaintainerCommand{Maintainer: maintainerEntry}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
-	assert.Equal(t, maintainerEntry, req.state.maintainer)
+	assert.Equal(t, maintainerEntry, sb.state.maintainer)
 }
 
 func TestLabel(t *testing.T) {
 	labelName := "label"
 	labelValue := "value"
 
-	labelEntry := []string{labelName, labelValue}
 	b := newBuilderWithMockBackend()
-	req := defaultDispatchReq(b, labelEntry...)
-	err := label(req)
+	sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	cmd := &instructions.LabelCommand{
+		Labels: instructions.KeyValuePairs{
+			instructions.KeyValuePair{Key: labelName, Value: labelValue},
+		},
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
 
-	require.Contains(t, req.state.runConfig.Labels, labelName)
-	assert.Equal(t, req.state.runConfig.Labels[labelName], labelValue)
+	require.Contains(t, sb.state.runConfig.Labels, labelName)
+	assert.Equal(t, sb.state.runConfig.Labels[labelName], labelValue)
 }
 
 func TestFromScratch(t *testing.T) {
 	b := newBuilderWithMockBackend()
-	req := defaultDispatchReq(b, "scratch")
-	err := from(req)
+	sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	cmd := &instructions.Stage{
+		BaseName: "scratch",
+	}
+	err := initializeStage(sb, cmd)
 
 	if runtime.GOOS == "windows" && !system.LCOWSupported() {
 		assert.EqualError(t, err, "Windows does not support FROM scratch")
@@ -200,14 +116,10 @@
 	}
 
 	require.NoError(t, err)
-	assert.True(t, req.state.hasFromImage())
-	assert.Equal(t, "", req.state.imageID)
-	// Windows does not set the default path. TODO @jhowardmsft LCOW support. This will need revisiting as we get further into the implementation
+	assert.True(t, sb.state.hasFromImage())
+	assert.Equal(t, "", sb.state.imageID)
 	expected := "PATH=" + system.DefaultPathEnv(runtime.GOOS)
-	if runtime.GOOS == "windows" {
-		expected = ""
-	}
-	assert.Equal(t, []string{expected}, req.state.runConfig.Env)
+	assert.Equal(t, []string{expected}, sb.state.runConfig.Env)
 }
 
 func TestFromWithArg(t *testing.T) {
@@ -219,16 +131,27 @@
 	}
 	b := newBuilderWithMockBackend()
 	b.docker.(*MockBackend).getImageFunc = getImage
+	args := newBuildArgs(make(map[string]*string))
 
-	require.NoError(t, arg(defaultDispatchReq(b, "THETAG="+tag)))
-	req := defaultDispatchReq(b, "alpine${THETAG}")
-	err := from(req)
+	val := "sometag"
+	metaArg := instructions.ArgCommand{
+		Key:   "THETAG",
+		Value: &val,
+	}
+	cmd := &instructions.Stage{
+		BaseName: "alpine:${THETAG}",
+	}
+	err := processMetaArg(metaArg, NewShellLex('\\'), args)
 
+	sb := newDispatchRequest(b, '\\', nil, args, newStagesBuildResults())
 	require.NoError(t, err)
-	assert.Equal(t, expected, req.state.imageID)
-	assert.Equal(t, expected, req.state.baseImage.ImageID())
-	assert.Len(t, b.buildArgs.GetAllAllowed(), 0)
-	assert.Len(t, b.buildArgs.GetAllMeta(), 1)
+	err = initializeStage(sb, cmd)
+	require.NoError(t, err)
+
+	assert.Equal(t, expected, sb.state.imageID)
+	assert.Equal(t, expected, sb.state.baseImage.ImageID())
+	assert.Len(t, sb.state.buildArgs.GetAllAllowed(), 0)
+	assert.Len(t, sb.state.buildArgs.GetAllMeta(), 1)
 }
 
 func TestFromWithUndefinedArg(t *testing.T) {
@@ -240,74 +163,74 @@
 	}
 	b := newBuilderWithMockBackend()
 	b.docker.(*MockBackend).getImageFunc = getImage
+	sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+
 	b.options.BuildArgs = map[string]*string{"THETAG": &tag}
 
-	req := defaultDispatchReq(b, "alpine${THETAG}")
-	err := from(req)
+	cmd := &instructions.Stage{
+		BaseName: "alpine${THETAG}",
+	}
+	err := initializeStage(sb, cmd)
 	require.NoError(t, err)
-	assert.Equal(t, expected, req.state.imageID)
+	assert.Equal(t, expected, sb.state.imageID)
 }
 
-func TestFromMultiStageWithScratchNamedStage(t *testing.T) {
-	if runtime.GOOS == "windows" {
-		t.Skip("Windows does not support scratch")
-	}
+func TestFromMultiStageWithNamedStage(t *testing.T) {
 	b := newBuilderWithMockBackend()
-	req := defaultDispatchReq(b, "scratch", "AS", "base")
-
-	require.NoError(t, from(req))
-	assert.True(t, req.state.hasFromImage())
-
-	req.args = []string{"base"}
-	require.NoError(t, from(req))
-	assert.True(t, req.state.hasFromImage())
-}
-
-func TestOnbuildIllegalTriggers(t *testing.T) {
-	triggers := []struct{ command, expectedError string }{
-		{"ONBUILD", "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed"},
-		{"MAINTAINER", "MAINTAINER isn't allowed as an ONBUILD trigger"},
-		{"FROM", "FROM isn't allowed as an ONBUILD trigger"}}
-
-	for _, trigger := range triggers {
-		b := newBuilderWithMockBackend()
-
-		err := onbuild(defaultDispatchReq(b, trigger.command))
-		testutil.ErrorContains(t, err, trigger.expectedError)
-	}
+	firstFrom := &instructions.Stage{BaseName: "someimg", Name: "base"}
+	secondFrom := &instructions.Stage{BaseName: "base"}
+	previousResults := newStagesBuildResults()
+	firstSB := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), previousResults)
+	secondSB := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), previousResults)
+	err := initializeStage(firstSB, firstFrom)
+	require.NoError(t, err)
+	assert.True(t, firstSB.state.hasFromImage())
+	previousResults.indexed["base"] = firstSB.state.runConfig
+	previousResults.flat = append(previousResults.flat, firstSB.state.runConfig)
+	err = initializeStage(secondSB, secondFrom)
+	require.NoError(t, err)
+	assert.True(t, secondSB.state.hasFromImage())
 }
 
 func TestOnbuild(t *testing.T) {
 	b := newBuilderWithMockBackend()
-
-	req := defaultDispatchReq(b, "ADD", ".", "/app/src")
-	req.original = "ONBUILD ADD . /app/src"
-	req.state.runConfig = &container.Config{}
-
-	err := onbuild(req)
+	sb := newDispatchRequest(b, '\\', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	cmd := &instructions.OnbuildCommand{
+		Expression: "ADD . /app/src",
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
-	assert.Equal(t, "ADD . /app/src", req.state.runConfig.OnBuild[0])
+	assert.Equal(t, "ADD . /app/src", sb.state.runConfig.OnBuild[0])
 }
 
 func TestWorkdir(t *testing.T) {
 	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 	workingDir := "/app"
 	if runtime.GOOS == "windows" {
-		workingDir = "C:\app"
+		workingDir = "C:\\app"
+	}
+	cmd := &instructions.WorkdirCommand{
+		Path: workingDir,
 	}
 
-	req := defaultDispatchReq(b, workingDir)
-	err := workdir(req)
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
-	assert.Equal(t, workingDir, req.state.runConfig.WorkingDir)
+	assert.Equal(t, workingDir, sb.state.runConfig.WorkingDir)
 }
 
 func TestCmd(t *testing.T) {
 	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 	command := "./executable"
 
-	req := defaultDispatchReq(b, command)
-	err := cmd(req)
+	cmd := &instructions.CmdCommand{
+		ShellDependantCmdLine: instructions.ShellDependantCmdLine{
+			CmdLine:      strslice.StrSlice{command},
+			PrependShell: true,
+		},
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
 
 	var expectedCommand strslice.StrSlice
@@ -317,42 +240,56 @@
 		expectedCommand = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", command))
 	}
 
-	assert.Equal(t, expectedCommand, req.state.runConfig.Cmd)
-	assert.True(t, req.state.cmdSet)
+	assert.Equal(t, expectedCommand, sb.state.runConfig.Cmd)
+	assert.True(t, sb.state.cmdSet)
 }
 
 func TestHealthcheckNone(t *testing.T) {
 	b := newBuilderWithMockBackend()
-
-	req := defaultDispatchReq(b, "NONE")
-	err := healthcheck(req)
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	cmd := &instructions.HealthCheckCommand{
+		Health: &container.HealthConfig{
+			Test: []string{"NONE"},
+		},
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
 
-	require.NotNil(t, req.state.runConfig.Healthcheck)
-	assert.Equal(t, []string{"NONE"}, req.state.runConfig.Healthcheck.Test)
+	require.NotNil(t, sb.state.runConfig.Healthcheck)
+	assert.Equal(t, []string{"NONE"}, sb.state.runConfig.Healthcheck.Test)
 }
 
 func TestHealthcheckCmd(t *testing.T) {
-	b := newBuilderWithMockBackend()
 
-	args := []string{"CMD", "curl", "-f", "http://localhost/", "||", "exit", "1"}
-	req := defaultDispatchReq(b, args...)
-	err := healthcheck(req)
+	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"}
+	cmd := &instructions.HealthCheckCommand{
+		Health: &container.HealthConfig{
+			Test: expectedTest,
+		},
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
 
-	require.NotNil(t, req.state.runConfig.Healthcheck)
-	expectedTest := []string{"CMD-SHELL", "curl -f http://localhost/ || exit 1"}
-	assert.Equal(t, expectedTest, req.state.runConfig.Healthcheck.Test)
+	require.NotNil(t, sb.state.runConfig.Healthcheck)
+	assert.Equal(t, expectedTest, sb.state.runConfig.Healthcheck.Test)
 }
 
 func TestEntrypoint(t *testing.T) {
 	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 	entrypointCmd := "/usr/sbin/nginx"
 
-	req := defaultDispatchReq(b, entrypointCmd)
-	err := entrypoint(req)
+	cmd := &instructions.EntrypointCommand{
+		ShellDependantCmdLine: instructions.ShellDependantCmdLine{
+			CmdLine:      strslice.StrSlice{entrypointCmd},
+			PrependShell: true,
+		},
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
-	require.NotNil(t, req.state.runConfig.Entrypoint)
+	require.NotNil(t, sb.state.runConfig.Entrypoint)
 
 	var expectedEntrypoint strslice.StrSlice
 	if runtime.GOOS == "windows" {
@@ -360,99 +297,99 @@
 	} else {
 		expectedEntrypoint = strslice.StrSlice(append([]string{"/bin/sh"}, "-c", entrypointCmd))
 	}
-	assert.Equal(t, expectedEntrypoint, req.state.runConfig.Entrypoint)
+	assert.Equal(t, expectedEntrypoint, sb.state.runConfig.Entrypoint)
 }
 
 func TestExpose(t *testing.T) {
 	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 
 	exposedPort := "80"
-	req := defaultDispatchReq(b, exposedPort)
-	err := expose(req)
+	cmd := &instructions.ExposeCommand{
+		Ports: []string{exposedPort},
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
 
-	require.NotNil(t, req.state.runConfig.ExposedPorts)
-	require.Len(t, req.state.runConfig.ExposedPorts, 1)
+	require.NotNil(t, sb.state.runConfig.ExposedPorts)
+	require.Len(t, sb.state.runConfig.ExposedPorts, 1)
 
 	portsMapping, err := nat.ParsePortSpec(exposedPort)
 	require.NoError(t, err)
-	assert.Contains(t, req.state.runConfig.ExposedPorts, portsMapping[0].Port)
+	assert.Contains(t, sb.state.runConfig.ExposedPorts, portsMapping[0].Port)
 }
 
 func TestUser(t *testing.T) {
 	b := newBuilderWithMockBackend()
-	userCommand := "foo"
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 
-	req := defaultDispatchReq(b, userCommand)
-	err := user(req)
+	cmd := &instructions.UserCommand{
+		User: "test",
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
-	assert.Equal(t, userCommand, req.state.runConfig.User)
+	assert.Equal(t, "test", sb.state.runConfig.User)
 }
 
 func TestVolume(t *testing.T) {
 	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 
 	exposedVolume := "/foo"
 
-	req := defaultDispatchReq(b, exposedVolume)
-	err := volume(req)
+	cmd := &instructions.VolumeCommand{
+		Volumes: []string{exposedVolume},
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
-
-	require.NotNil(t, req.state.runConfig.Volumes)
-	assert.Len(t, req.state.runConfig.Volumes, 1)
-	assert.Contains(t, req.state.runConfig.Volumes, exposedVolume)
+	require.NotNil(t, sb.state.runConfig.Volumes)
+	assert.Len(t, sb.state.runConfig.Volumes, 1)
+	assert.Contains(t, sb.state.runConfig.Volumes, exposedVolume)
 }
 
 func TestStopSignal(t *testing.T) {
+	if runtime.GOOS == "windows" {
+		t.Skip("Windows does not support stopsignal")
+		return
+	}
 	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 	signal := "SIGKILL"
 
-	req := defaultDispatchReq(b, signal)
-	err := stopSignal(req)
+	cmd := &instructions.StopSignalCommand{
+		Signal: signal,
+	}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
-	assert.Equal(t, signal, req.state.runConfig.StopSignal)
+	assert.Equal(t, signal, sb.state.runConfig.StopSignal)
 }
 
 func TestArg(t *testing.T) {
 	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 
 	argName := "foo"
 	argVal := "bar"
-	argDef := fmt.Sprintf("%s=%s", argName, argVal)
-
-	err := arg(defaultDispatchReq(b, argDef))
+	cmd := &instructions.ArgCommand{Key: argName, Value: &argVal}
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
 
 	expected := map[string]string{argName: argVal}
-	assert.Equal(t, expected, b.buildArgs.GetAllAllowed())
+	assert.Equal(t, expected, sb.state.buildArgs.GetAllAllowed())
 }
 
 func TestShell(t *testing.T) {
 	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', nil, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
 
 	shellCmd := "powershell"
-	req := defaultDispatchReq(b, shellCmd)
-	req.attributes = map[string]bool{"json": true}
+	cmd := &instructions.ShellCommand{Shell: strslice.StrSlice{shellCmd}}
 
-	err := shell(req)
+	err := dispatch(sb, cmd)
 	require.NoError(t, err)
 
 	expectedShell := strslice.StrSlice([]string{shellCmd})
-	assert.Equal(t, expectedShell, req.state.runConfig.Shell)
-}
-
-func TestParseOptInterval(t *testing.T) {
-	flInterval := &Flag{
-		name:     "interval",
-		flagType: stringType,
-		Value:    "50ns",
-	}
-	_, err := parseOptInterval(flInterval)
-	testutil.ErrorContains(t, err, "cannot be less than 1ms")
-
-	flInterval.Value = "1ms"
-	_, err = parseOptInterval(flInterval)
-	require.NoError(t, err)
+	assert.Equal(t, expectedShell, sb.state.runConfig.Shell)
 }
 
 func TestPrependEnvOnCmd(t *testing.T) {
@@ -469,8 +406,10 @@
 
 func TestRunWithBuildArgs(t *testing.T) {
 	b := newBuilderWithMockBackend()
-	b.buildArgs.argsFromOptions["HTTP_PROXY"] = strPtr("FOO")
+	args := newBuildArgs(make(map[string]*string))
+	args.argsFromOptions["HTTP_PROXY"] = strPtr("FOO")
 	b.disableCommit = false
+	sb := newDispatchRequest(b, '`', nil, args, newStagesBuildResults())
 
 	runConfig := &container.Config{}
 	origCmd := strslice.StrSlice([]string{"cmd", "in", "from", "image"})
@@ -512,14 +451,18 @@
 		assert.Equal(t, strslice.StrSlice(nil), cfg.Config.Entrypoint)
 		return "", nil
 	}
-
-	req := defaultDispatchReq(b, "abcdef")
-	require.NoError(t, from(req))
-	b.buildArgs.AddArg("one", strPtr("two"))
-
-	req.args = []string{"echo foo"}
-	require.NoError(t, run(req))
+	from := &instructions.Stage{BaseName: "abcdef"}
+	err := initializeStage(sb, from)
+	require.NoError(t, err)
+	sb.state.buildArgs.AddArg("one", strPtr("two"))
+	run := &instructions.RunCommand{
+		ShellDependantCmdLine: instructions.ShellDependantCmdLine{
+			CmdLine:      strslice.StrSlice{"echo foo"},
+			PrependShell: true,
+		},
+	}
+	require.NoError(t, dispatch(sb, run))
 
 	// Check that runConfig.Cmd has not been modified by run
-	assert.Equal(t, origCmd, req.state.runConfig.Cmd)
+	assert.Equal(t, origCmd, sb.state.runConfig.Cmd)
 }
diff --git a/builder/dockerfile/dispatchers_unix.go b/builder/dockerfile/dispatchers_unix.go
index c815ec5..6f0d581 100644
--- a/builder/dockerfile/dispatchers_unix.go
+++ b/builder/dockerfile/dispatchers_unix.go
@@ -4,7 +4,6 @@
 
 import (
 	"errors"
-	"fmt"
 	"os"
 	"path/filepath"
 )
@@ -23,10 +22,6 @@
 	return requested, nil
 }
 
-func errNotJSON(command, _ string) error {
-	return fmt.Errorf("%s requires the arguments to be in JSON form", command)
-}
-
 // equalEnvKeys compare two strings and returns true if they are equal. On
 // Windows this comparison is case insensitive.
 func equalEnvKeys(from, to string) bool {
diff --git a/builder/dockerfile/dispatchers_windows.go b/builder/dockerfile/dispatchers_windows.go
index 1c77b29..8f6eaac 100644
--- a/builder/dockerfile/dispatchers_windows.go
+++ b/builder/dockerfile/dispatchers_windows.go
@@ -94,25 +94,6 @@
 	return (strings.ToUpper(string(requested[0])) + requested[1:]), nil
 }
 
-func errNotJSON(command, original string) error {
-	// For Windows users, give a hint if it looks like it might contain
-	// a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"],
-	// as JSON must be escaped. Unfortunate...
-	//
-	// Specifically looking for quote-driveletter-colon-backslash, there's no
-	// double backslash and a [] pair. No, this is not perfect, but it doesn't
-	// have to be. It's simply a hint to make life a little easier.
-	extra := ""
-	original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1)))
-	if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 &&
-		!strings.Contains(original, `\\`) &&
-		strings.Contains(original, "[") &&
-		strings.Contains(original, "]") {
-		extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original)
-	}
-	return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra)
-}
-
 // equalEnvKeys compare two strings and returns true if they are equal. On
 // Windows this comparison is case insensitive.
 func equalEnvKeys(from, to string) bool {
diff --git a/builder/dockerfile/evaluator.go b/builder/dockerfile/evaluator.go
index a3ca201..da97a7f 100644
--- a/builder/dockerfile/evaluator.go
+++ b/builder/dockerfile/evaluator.go
@@ -20,169 +20,85 @@
 package dockerfile
 
 import (
-	"bytes"
-	"fmt"
-	"runtime"
+	"reflect"
+	"strconv"
 	"strings"
 
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/builder"
-	"github.com/docker/docker/builder/dockerfile/command"
-	"github.com/docker/docker/builder/dockerfile/parser"
+	"github.com/docker/docker/builder/dockerfile/instructions"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/runconfig/opts"
 	"github.com/pkg/errors"
 )
 
-// Environment variable interpolation will happen on these statements only.
-var replaceEnvAllowed = map[string]bool{
-	command.Env:        true,
-	command.Label:      true,
-	command.Add:        true,
-	command.Copy:       true,
-	command.Workdir:    true,
-	command.Expose:     true,
-	command.Volume:     true,
-	command.User:       true,
-	command.StopSignal: true,
-	command.Arg:        true,
-}
-
-// Certain commands are allowed to have their args split into more
-// words after env var replacements. Meaning:
-//   ENV foo="123 456"
-//   EXPOSE $foo
-// should result in the same thing as:
-//   EXPOSE 123 456
-// and not treat "123 456" as a single word.
-// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing.
-// Quotes will cause it to still be treated as single word.
-var allowWordExpansion = map[string]bool{
-	command.Expose: true,
-}
-
-type dispatchRequest struct {
-	builder    *Builder // TODO: replace this with a smaller interface
-	args       []string
-	attributes map[string]bool
-	flags      *BFlags
-	original   string
-	shlex      *ShellLex
-	state      *dispatchState
-	source     builder.Source
-}
-
-func newDispatchRequestFromOptions(options dispatchOptions, builder *Builder, args []string) dispatchRequest {
-	return dispatchRequest{
-		builder:    builder,
-		args:       args,
-		attributes: options.node.Attributes,
-		original:   options.node.Original,
-		flags:      NewBFlagsWithArgs(options.node.Flags),
-		shlex:      options.shlex,
-		state:      options.state,
-		source:     options.source,
-	}
-}
-
-type dispatcher func(dispatchRequest) error
-
-var evaluateTable map[string]dispatcher
-
-func init() {
-	evaluateTable = map[string]dispatcher{
-		command.Add:         add,
-		command.Arg:         arg,
-		command.Cmd:         cmd,
-		command.Copy:        dispatchCopy, // copy() is a go builtin
-		command.Entrypoint:  entrypoint,
-		command.Env:         env,
-		command.Expose:      expose,
-		command.From:        from,
-		command.Healthcheck: healthcheck,
-		command.Label:       label,
-		command.Maintainer:  maintainer,
-		command.Onbuild:     onbuild,
-		command.Run:         run,
-		command.Shell:       shell,
-		command.StopSignal:  stopSignal,
-		command.User:        user,
-		command.Volume:      volume,
-		command.Workdir:     workdir,
-	}
-}
-
-func formatStep(stepN int, stepTotal int) string {
-	return fmt.Sprintf("%d/%d", stepN+1, stepTotal)
-}
-
-// This method is the entrypoint to all statement handling routines.
-//
-// Almost all nodes will have this structure:
-// Child[Node, Node, Node] where Child is from parser.Node.Children and each
-// node comes from parser.Node.Next. This forms a "line" with a statement and
-// arguments and we process them in this normalized form by hitting
-// evaluateTable with the leaf nodes of the command and the Builder object.
-//
-// ONBUILD is a special case; in this case the parser will emit:
-// Child[Node, Child[Node, Node...]] where the first node is the literal
-// "onbuild" and the child entrypoint is the command of the ONBUILD statement,
-// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to
-// deal with that, at least until it becomes more of a general concern with new
-// features.
-func (b *Builder) dispatch(options dispatchOptions) (*dispatchState, error) {
-	node := options.node
-	cmd := node.Value
-	upperCasedCmd := strings.ToUpper(cmd)
-
-	// To ensure the user is given a decent error message if the platform
-	// on which the daemon is running does not support a builder command.
-	if err := platformSupports(strings.ToLower(cmd)); err != nil {
-		buildsFailed.WithValues(metricsCommandNotSupportedError).Inc()
-		return nil, validationError{err}
-	}
-
-	msg := bytes.NewBufferString(fmt.Sprintf("Step %s : %s%s",
-		options.stepMsg, upperCasedCmd, formatFlags(node.Flags)))
-
-	args := []string{}
-	ast := node
-	if cmd == command.Onbuild {
-		var err error
-		ast, args, err = handleOnBuildNode(node, msg)
+func dispatch(d dispatchRequest, cmd instructions.Command) (err error) {
+	if c, ok := cmd.(instructions.PlatformSpecific); ok {
+		optionsOS := system.ParsePlatform(d.builder.options.Platform).OS
+		err := c.CheckPlatform(optionsOS)
 		if err != nil {
-			return nil, validationError{err}
+			return validationError{err}
+		}
+	}
+	runConfigEnv := d.state.runConfig.Env
+	envs := append(runConfigEnv, d.state.buildArgs.FilterAllowed(runConfigEnv)...)
+
+	if ex, ok := cmd.(instructions.SupportsSingleWordExpansion); ok {
+		err := ex.Expand(func(word string) (string, error) {
+			return d.shlex.ProcessWord(word, envs)
+		})
+		if err != nil {
+			return validationError{err}
 		}
 	}
 
-	runConfigEnv := options.state.runConfig.Env
-	envs := append(runConfigEnv, b.buildArgs.FilterAllowed(runConfigEnv)...)
-	processFunc := createProcessWordFunc(options.shlex, cmd, envs)
-	words, err := getDispatchArgsFromNode(ast, processFunc, msg)
-	if err != nil {
-		buildsFailed.WithValues(metricsErrorProcessingCommandsError).Inc()
-		return nil, validationError{err}
+	defer func() {
+		if d.builder.options.ForceRemove {
+			d.builder.containerManager.RemoveAll(d.builder.Stdout)
+			return
+		}
+		if d.builder.options.Remove && err == nil {
+			d.builder.containerManager.RemoveAll(d.builder.Stdout)
+			return
+		}
+	}()
+	switch c := cmd.(type) {
+	case *instructions.EnvCommand:
+		return dispatchEnv(d, c)
+	case *instructions.MaintainerCommand:
+		return dispatchMaintainer(d, c)
+	case *instructions.LabelCommand:
+		return dispatchLabel(d, c)
+	case *instructions.AddCommand:
+		return dispatchAdd(d, c)
+	case *instructions.CopyCommand:
+		return dispatchCopy(d, c)
+	case *instructions.OnbuildCommand:
+		return dispatchOnbuild(d, c)
+	case *instructions.WorkdirCommand:
+		return dispatchWorkdir(d, c)
+	case *instructions.RunCommand:
+		return dispatchRun(d, c)
+	case *instructions.CmdCommand:
+		return dispatchCmd(d, c)
+	case *instructions.HealthCheckCommand:
+		return dispatchHealthcheck(d, c)
+	case *instructions.EntrypointCommand:
+		return dispatchEntrypoint(d, c)
+	case *instructions.ExposeCommand:
+		return dispatchExpose(d, c, envs)
+	case *instructions.UserCommand:
+		return dispatchUser(d, c)
+	case *instructions.VolumeCommand:
+		return dispatchVolume(d, c)
+	case *instructions.StopSignalCommand:
+		return dispatchStopSignal(d, c)
+	case *instructions.ArgCommand:
+		return dispatchArg(d, c)
+	case *instructions.ShellCommand:
+		return dispatchShell(d, c)
 	}
-	args = append(args, words...)
-
-	fmt.Fprintln(b.Stdout, msg.String())
-
-	f, ok := evaluateTable[cmd]
-	if !ok {
-		buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
-		return nil, validationError{errors.Errorf("unknown instruction: %s", upperCasedCmd)}
-	}
-	options.state.updateRunConfig()
-	err = f(newDispatchRequestFromOptions(options, b, args))
-	return options.state, err
-}
-
-type dispatchOptions struct {
-	state   *dispatchState
-	stepMsg string
-	node    *parser.Node
-	shlex   *ShellLex
-	source  builder.Source
+	return errors.Errorf("unsupported command type: %v", reflect.TypeOf(cmd))
 }
 
 // dispatchState is a data object which is modified by dispatchers
@@ -193,10 +109,95 @@
 	imageID    string
 	baseImage  builder.Image
 	stageName  string
+	buildArgs  *buildArgs
 }
 
-func newDispatchState() *dispatchState {
-	return &dispatchState{runConfig: &container.Config{}}
+func newDispatchState(baseArgs *buildArgs) *dispatchState {
+	args := baseArgs.Clone()
+	args.ResetAllowed()
+	return &dispatchState{runConfig: &container.Config{}, buildArgs: args}
+}
+
+type stagesBuildResults struct {
+	flat    []*container.Config
+	indexed map[string]*container.Config
+}
+
+func newStagesBuildResults() *stagesBuildResults {
+	return &stagesBuildResults{
+		indexed: make(map[string]*container.Config),
+	}
+}
+
+func (r *stagesBuildResults) getByName(name string) (*container.Config, bool) {
+	c, ok := r.indexed[strings.ToLower(name)]
+	return c, ok
+}
+
+func (r *stagesBuildResults) validateIndex(i int) error {
+	if i == len(r.flat) {
+		return errors.New("refers to current build stage")
+	}
+	if i < 0 || i > len(r.flat) {
+		return errors.New("index out of bounds")
+	}
+	return nil
+}
+
+func (r *stagesBuildResults) get(nameOrIndex string) (*container.Config, error) {
+	if c, ok := r.getByName(nameOrIndex); ok {
+		return c, nil
+	}
+	ix, err := strconv.ParseInt(nameOrIndex, 10, 0)
+	if err != nil {
+		return nil, nil
+	}
+	if err := r.validateIndex(int(ix)); err != nil {
+		return nil, err
+	}
+	return r.flat[ix], nil
+}
+
+func (r *stagesBuildResults) checkStageNameAvailable(name string) error {
+	if name != "" {
+		if _, ok := r.getByName(name); ok {
+			return errors.Errorf("%s stage name already used", name)
+		}
+	}
+	return nil
+}
+
+func (r *stagesBuildResults) commitStage(name string, config *container.Config) error {
+	if name != "" {
+		if _, ok := r.getByName(name); ok {
+			return errors.Errorf("%s stage name already used", name)
+		}
+		r.indexed[strings.ToLower(name)] = config
+	}
+	r.flat = append(r.flat, config)
+	return nil
+}
+
+func commitStage(state *dispatchState, stages *stagesBuildResults) error {
+	return stages.commitStage(state.stageName, state.runConfig)
+}
+
+type dispatchRequest struct {
+	state   *dispatchState
+	shlex   *ShellLex
+	builder *Builder
+	source  builder.Source
+	stages  *stagesBuildResults
+}
+
+func newDispatchRequest(builder *Builder, escapeToken rune, source builder.Source, buildArgs *buildArgs, stages *stagesBuildResults) dispatchRequest {
+	return dispatchRequest{
+		state:   newDispatchState(buildArgs),
+		shlex:   NewShellLex(escapeToken),
+		builder: builder,
+		source:  source,
+		stages:  stages,
+	}
 }
 
 func (s *dispatchState) updateRunConfig() {
@@ -208,120 +209,30 @@
 	return s.imageID != "" || (s.baseImage != nil && s.baseImage.ImageID() == "")
 }
 
-func (s *dispatchState) isCurrentStage(target string) bool {
-	if target == "" {
-		return false
-	}
-	return strings.EqualFold(s.stageName, target)
-}
-
 func (s *dispatchState) beginStage(stageName string, image builder.Image) {
 	s.stageName = stageName
 	s.imageID = image.ImageID()
 
 	if image.RunConfig() != nil {
-		s.runConfig = image.RunConfig()
+		s.runConfig = copyRunConfig(image.RunConfig()) // copy avoids referencing the same instance when 2 stages have the same base
 	} else {
 		s.runConfig = &container.Config{}
 	}
 	s.baseImage = image
 	s.setDefaultPath()
+	s.runConfig.OpenStdin = false
+	s.runConfig.StdinOnce = false
 }
 
-// Add the default PATH to runConfig.ENV if one exists for the platform and there
+// Add the default PATH to runConfig.ENV if one exists for the operating system and there
 // is no PATH set. Note that Windows containers on Windows won't have one as it's set by HCS
 func (s *dispatchState) setDefaultPath() {
-	// TODO @jhowardmsft LCOW Support - This will need revisiting later
-	platform := runtime.GOOS
-	if system.LCOWSupported() {
-		platform = "linux"
-	}
-	if system.DefaultPathEnv(platform) == "" {
+	defaultPath := system.DefaultPathEnv(s.baseImage.OperatingSystem())
+	if defaultPath == "" {
 		return
 	}
 	envMap := opts.ConvertKVStringsToMap(s.runConfig.Env)
 	if _, ok := envMap["PATH"]; !ok {
-		s.runConfig.Env = append(s.runConfig.Env, "PATH="+system.DefaultPathEnv(platform))
+		s.runConfig.Env = append(s.runConfig.Env, "PATH="+defaultPath)
 	}
 }
-
-func handleOnBuildNode(ast *parser.Node, msg *bytes.Buffer) (*parser.Node, []string, error) {
-	if ast.Next == nil {
-		return nil, nil, validationError{errors.New("ONBUILD requires at least one argument")}
-	}
-	ast = ast.Next.Children[0]
-	msg.WriteString(" " + ast.Value + formatFlags(ast.Flags))
-	return ast, []string{ast.Value}, nil
-}
-
-func formatFlags(flags []string) string {
-	if len(flags) > 0 {
-		return " " + strings.Join(flags, " ")
-	}
-	return ""
-}
-
-func getDispatchArgsFromNode(ast *parser.Node, processFunc processWordFunc, msg *bytes.Buffer) ([]string, error) {
-	args := []string{}
-	for i := 0; ast.Next != nil; i++ {
-		ast = ast.Next
-		words, err := processFunc(ast.Value)
-		if err != nil {
-			return nil, err
-		}
-		args = append(args, words...)
-		msg.WriteString(" " + ast.Value)
-	}
-	return args, nil
-}
-
-type processWordFunc func(string) ([]string, error)
-
-func createProcessWordFunc(shlex *ShellLex, cmd string, envs []string) processWordFunc {
-	switch {
-	case !replaceEnvAllowed[cmd]:
-		return func(word string) ([]string, error) {
-			return []string{word}, nil
-		}
-	case allowWordExpansion[cmd]:
-		return func(word string) ([]string, error) {
-			return shlex.ProcessWords(word, envs)
-		}
-	default:
-		return func(word string) ([]string, error) {
-			word, err := shlex.ProcessWord(word, envs)
-			return []string{word}, err
-		}
-	}
-}
-
-// checkDispatch does a simple check for syntax errors of the Dockerfile.
-// Because some of the instructions can only be validated through runtime,
-// arg, env, etc., this syntax check will not be complete and could not replace
-// the runtime check. Instead, this function is only a helper that allows
-// user to find out the obvious error in Dockerfile earlier on.
-func checkDispatch(ast *parser.Node) error {
-	cmd := ast.Value
-	upperCasedCmd := strings.ToUpper(cmd)
-
-	// To ensure the user is given a decent error message if the platform
-	// on which the daemon is running does not support a builder command.
-	if err := platformSupports(strings.ToLower(cmd)); err != nil {
-		return err
-	}
-
-	// The instruction itself is ONBUILD, we will make sure it follows with at
-	// least one argument
-	if upperCasedCmd == "ONBUILD" {
-		if ast.Next == nil {
-			buildsFailed.WithValues(metricsMissingOnbuildArgumentsError).Inc()
-			return errors.New("ONBUILD requires at least one argument")
-		}
-	}
-
-	if _, ok := evaluateTable[cmd]; ok {
-		return nil
-	}
-	buildsFailed.WithValues(metricsUnknownInstructionError).Inc()
-	return errors.Errorf("unknown instruction: %s", upperCasedCmd)
-}
diff --git a/builder/dockerfile/evaluator_test.go b/builder/dockerfile/evaluator_test.go
index 72d7ce1..fc5512d 100644
--- a/builder/dockerfile/evaluator_test.go
+++ b/builder/dockerfile/evaluator_test.go
@@ -1,21 +1,19 @@
 package dockerfile
 
 import (
-	"io/ioutil"
-	"strings"
 	"testing"
 
-	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/api/types/container"
-	"github.com/docker/docker/builder/dockerfile/parser"
+	"github.com/docker/docker/builder/dockerfile/instructions"
 	"github.com/docker/docker/builder/remotecontext"
+	"github.com/docker/docker/internal/testutil"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/reexec"
 )
 
 type dispatchTestCase struct {
-	name, dockerfile, expectedError string
-	files                           map[string]string
+	name, expectedError string
+	cmd                 instructions.Command
+	files               map[string]string
 }
 
 func init() {
@@ -23,108 +21,73 @@
 }
 
 func initDispatchTestCases() []dispatchTestCase {
-	dispatchTestCases := []dispatchTestCase{{
-		name: "copyEmptyWhitespace",
-		dockerfile: `COPY
-	quux \
-      bar`,
-		expectedError: "COPY requires at least two arguments",
-	},
+	dispatchTestCases := []dispatchTestCase{
 		{
-			name:          "ONBUILD forbidden FROM",
-			dockerfile:    "ONBUILD FROM scratch",
-			expectedError: "FROM isn't allowed as an ONBUILD trigger",
-			files:         nil,
-		},
-		{
-			name:          "ONBUILD forbidden MAINTAINER",
-			dockerfile:    "ONBUILD MAINTAINER docker.io",
-			expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger",
-			files:         nil,
-		},
-		{
-			name:          "ARG two arguments",
-			dockerfile:    "ARG foo bar",
-			expectedError: "ARG requires exactly one argument",
-			files:         nil,
-		},
-		{
-			name:          "MAINTAINER unknown flag",
-			dockerfile:    "MAINTAINER --boo joe@example.com",
-			expectedError: "Unknown flag: boo",
-			files:         nil,
-		},
-		{
-			name:          "ADD multiple files to file",
-			dockerfile:    "ADD file1.txt file2.txt test",
+			name: "ADD multiple files to file",
+			cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
+				"file1.txt",
+				"file2.txt",
+				"test",
+			}},
 			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
 			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
 		},
 		{
-			name:          "JSON ADD multiple files to file",
-			dockerfile:    `ADD ["file1.txt", "file2.txt", "test"]`,
+			name: "Wildcard ADD multiple files to file",
+			cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
+				"file*.txt",
+				"test",
+			}},
 			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
 			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
 		},
 		{
-			name:          "Wildcard ADD multiple files to file",
-			dockerfile:    "ADD file*.txt test",
-			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
-			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
-		},
-		{
-			name:          "Wildcard JSON ADD multiple files to file",
-			dockerfile:    `ADD ["file*.txt", "test"]`,
-			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
-			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
-		},
-		{
-			name:          "COPY multiple files to file",
-			dockerfile:    "COPY file1.txt file2.txt test",
+			name: "COPY multiple files to file",
+			cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
+				"file1.txt",
+				"file2.txt",
+				"test",
+			}},
 			expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
 			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
 		},
 		{
-			name:          "JSON COPY multiple files to file",
-			dockerfile:    `COPY ["file1.txt", "file2.txt", "test"]`,
-			expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
-			files:         map[string]string{"file1.txt": "test1", "file2.txt": "test2"},
-		},
-		{
-			name:          "ADD multiple files to file with whitespace",
-			dockerfile:    `ADD [ "test file1.txt", "test file2.txt", "test" ]`,
+			name: "ADD multiple files to file with whitespace",
+			cmd: &instructions.AddCommand{SourcesAndDest: instructions.SourcesAndDest{
+				"test file1.txt",
+				"test file2.txt",
+				"test",
+			}},
 			expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /",
 			files:         map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"},
 		},
 		{
-			name:          "COPY multiple files to file with whitespace",
-			dockerfile:    `COPY [ "test file1.txt", "test file2.txt", "test" ]`,
+			name: "COPY multiple files to file with whitespace",
+			cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
+				"test file1.txt",
+				"test file2.txt",
+				"test",
+			}},
 			expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /",
 			files:         map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"},
 		},
 		{
-			name:          "COPY wildcard no files",
-			dockerfile:    `COPY file*.txt /tmp/`,
+			name: "COPY wildcard no files",
+			cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
+				"file*.txt",
+				"/tmp/",
+			}},
 			expectedError: "COPY failed: no source files were specified",
 			files:         nil,
 		},
 		{
-			name:          "COPY url",
-			dockerfile:    `COPY https://index.docker.io/robots.txt /`,
+			name: "COPY url",
+			cmd: &instructions.CopyCommand{SourcesAndDest: instructions.SourcesAndDest{
+				"https://index.docker.io/robots.txt",
+				"/",
+			}},
 			expectedError: "source can't be a URL for COPY",
 			files:         nil,
-		},
-		{
-			name:          "Chaining ONBUILD",
-			dockerfile:    `ONBUILD ONBUILD RUN touch foobar`,
-			expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed",
-			files:         nil,
-		},
-		{
-			name:          "Invalid instruction",
-			dockerfile:    `foo bar`,
-			expectedError: "unknown instruction: FOO",
-			files:         nil,
 		}}
 
 	return dispatchTestCases
@@ -170,41 +133,8 @@
 		}
 	}()
 
-	r := strings.NewReader(testCase.dockerfile)
-	result, err := parser.Parse(r)
-
-	if err != nil {
-		t.Fatalf("Error when parsing Dockerfile: %s", err)
-	}
-
-	options := &types.ImageBuildOptions{
-		BuildArgs: make(map[string]*string),
-	}
-
-	b := &Builder{
-		options:   options,
-		Stdout:    ioutil.Discard,
-		buildArgs: newBuildArgs(options.BuildArgs),
-	}
-
-	shlex := NewShellLex(parser.DefaultEscapeToken)
-	n := result.AST
-	state := &dispatchState{runConfig: &container.Config{}}
-	opts := dispatchOptions{
-		state:   state,
-		stepMsg: formatStep(0, len(n.Children)),
-		node:    n.Children[0],
-		shlex:   shlex,
-		source:  context,
-	}
-	state, err = b.dispatch(opts)
-
-	if err == nil {
-		t.Fatalf("No error when executing test %s", testCase.name)
-	}
-
-	if !strings.Contains(err.Error(), testCase.expectedError) {
-		t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error())
-	}
-
+	b := newBuilderWithMockBackend()
+	sb := newDispatchRequest(b, '`', context, newBuildArgs(make(map[string]*string)), newStagesBuildResults())
+	err = dispatch(sb, testCase.cmd)
+	testutil.ErrorContains(t, err, testCase.expectedError)
 }
diff --git a/builder/dockerfile/evaluator_unix.go b/builder/dockerfile/evaluator_unix.go
deleted file mode 100644
index 28fd5b1..0000000
--- a/builder/dockerfile/evaluator_unix.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !windows
-
-package dockerfile
-
-// platformSupports is a short-term function to give users a quality error
-// message if a Dockerfile uses a command not supported on the platform.
-func platformSupports(command string) error {
-	return nil
-}
diff --git a/builder/dockerfile/evaluator_windows.go b/builder/dockerfile/evaluator_windows.go
deleted file mode 100644
index 72483a2..0000000
--- a/builder/dockerfile/evaluator_windows.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package dockerfile
-
-import "fmt"
-
-// platformSupports is gives users a quality error message if a Dockerfile uses
-// a command not supported on the platform.
-func platformSupports(command string) error {
-	switch command {
-	case "stopsignal":
-		return fmt.Errorf("The daemon on this platform does not support the command '%s'", command)
-	}
-	return nil
-}
diff --git a/builder/dockerfile/imagecontext.go b/builder/dockerfile/imagecontext.go
index fedad6f..a22b60b 100644
--- a/builder/dockerfile/imagecontext.go
+++ b/builder/dockerfile/imagecontext.go
@@ -1,91 +1,18 @@
 package dockerfile
 
 import (
-	"strconv"
-	"strings"
+	"runtime"
 
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder/remotecontext"
 	dockerimage "github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/system"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"golang.org/x/net/context"
 )
 
-type buildStage struct {
-	id string
-}
-
-func newBuildStage(imageID string) *buildStage {
-	return &buildStage{id: imageID}
-}
-
-func (b *buildStage) ImageID() string {
-	return b.id
-}
-
-func (b *buildStage) update(imageID string) {
-	b.id = imageID
-}
-
-// buildStages tracks each stage of a build so they can be retrieved by index
-// or by name.
-type buildStages struct {
-	sequence []*buildStage
-	byName   map[string]*buildStage
-}
-
-func newBuildStages() *buildStages {
-	return &buildStages{byName: make(map[string]*buildStage)}
-}
-
-func (s *buildStages) getByName(name string) (*buildStage, bool) {
-	stage, ok := s.byName[strings.ToLower(name)]
-	return stage, ok
-}
-
-func (s *buildStages) get(indexOrName string) (*buildStage, error) {
-	index, err := strconv.Atoi(indexOrName)
-	if err == nil {
-		if err := s.validateIndex(index); err != nil {
-			return nil, err
-		}
-		return s.sequence[index], nil
-	}
-	if im, ok := s.byName[strings.ToLower(indexOrName)]; ok {
-		return im, nil
-	}
-	return nil, nil
-}
-
-func (s *buildStages) validateIndex(i int) error {
-	if i < 0 || i >= len(s.sequence)-1 {
-		if i == len(s.sequence)-1 {
-			return errors.New("refers to current build stage")
-		}
-		return errors.New("index out of bounds")
-	}
-	return nil
-}
-
-func (s *buildStages) add(name string, image builder.Image) error {
-	stage := newBuildStage(image.ImageID())
-	name = strings.ToLower(name)
-	if len(name) > 0 {
-		if _, ok := s.byName[name]; ok {
-			return errors.Errorf("duplicate name %s", name)
-		}
-		s.byName[name] = stage
-	}
-	s.sequence = append(s.sequence, stage)
-	return nil
-}
-
-func (s *buildStages) update(imageID string) {
-	s.sequence[len(s.sequence)-1].update(imageID)
-}
-
 type getAndMountFunc func(string, bool) (builder.Image, builder.ReleaseableLayer, error)
 
 // imageSources mounts images and provides a cache for mounted images. It tracks
@@ -94,11 +21,8 @@
 	byImageID map[string]*imageMount
 	mounts    []*imageMount
 	getImage  getAndMountFunc
-	cache     pathCache // TODO: remove
 }
 
-// TODO @jhowardmsft LCOW Support: Eventually, platform can be moved to options.Options.Platform,
-// and removed from builderOptions, but that can't be done yet as it would affect the API.
 func newImageSources(ctx context.Context, options builderOptions) *imageSources {
 	getAndMount := func(idOrRef string, localOnly bool) (builder.Image, builder.ReleaseableLayer, error) {
 		pullOption := backend.PullOptionNoPull
@@ -109,11 +33,12 @@
 				pullOption = backend.PullOptionPreferLocal
 			}
 		}
+		optionsPlatform := system.ParsePlatform(options.Options.Platform)
 		return options.Backend.GetImageAndReleasableLayer(ctx, idOrRef, backend.GetImageAndLayerOptions{
 			PullOption: pullOption,
 			AuthConfig: options.Options.AuthConfigs,
 			Output:     options.ProgressWriter.Output,
-			Platform:   options.Platform,
+			OS:         optionsPlatform.OS,
 		})
 	}
 
@@ -150,7 +75,13 @@
 func (m *imageSources) Add(im *imageMount) {
 	switch im.image {
 	case nil:
-		im.image = &dockerimage.Image{}
+		// set the OS for scratch images
+		os := runtime.GOOS
+		// Windows does not support scratch except for LCOW
+		if runtime.GOOS == "windows" {
+			os = "linux"
+		}
+		im.image = &dockerimage.Image{V1Image: dockerimage.V1Image{OS: os}}
 	default:
 		m.byImageID[im.image.ImageID()] = im
 	}
diff --git a/builder/dockerfile/instructions/bflag.go b/builder/dockerfile/instructions/bflag.go
new file mode 100644
index 0000000..7a81e3c
--- /dev/null
+++ b/builder/dockerfile/instructions/bflag.go
@@ -0,0 +1,183 @@
+package instructions
+
+import (
+	"fmt"
+	"strings"
+)
+
+// FlagType is the type of the build flag
+type FlagType int
+
+const (
+	boolType FlagType = iota
+	stringType
+)
+
+// BFlags contains all flags information for the builder
+type BFlags struct {
+	Args  []string // actual flags/args from cmd line
+	flags map[string]*Flag
+	used  map[string]*Flag
+	Err   error
+}
+
+// Flag contains all information for a flag
+type Flag struct {
+	bf       *BFlags
+	name     string
+	flagType FlagType
+	Value    string
+}
+
+// NewBFlags returns the new BFlags struct
+func NewBFlags() *BFlags {
+	return &BFlags{
+		flags: make(map[string]*Flag),
+		used:  make(map[string]*Flag),
+	}
+}
+
+// NewBFlagsWithArgs returns the new BFlags struct with Args set to args
+func NewBFlagsWithArgs(args []string) *BFlags {
+	flags := NewBFlags()
+	flags.Args = args
+	return flags
+}
+
+// AddBool adds a bool flag to BFlags
+// Note, any error will be generated when Parse() is called (see Parse).
+func (bf *BFlags) AddBool(name string, def bool) *Flag {
+	flag := bf.addFlag(name, boolType)
+	if flag == nil {
+		return nil
+	}
+	if def {
+		flag.Value = "true"
+	} else {
+		flag.Value = "false"
+	}
+	return flag
+}
+
+// AddString adds a string flag to BFlags
+// Note, any error will be generated when Parse() is called (see Parse).
+func (bf *BFlags) AddString(name string, def string) *Flag {
+	flag := bf.addFlag(name, stringType)
+	if flag == nil {
+		return nil
+	}
+	flag.Value = def
+	return flag
+}
+
+// addFlag is a generic func used by the other AddXXX() func
+// to add a new flag to the BFlags struct.
+// Note, any error will be generated when Parse() is called (see Parse).
+func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag {
+	if _, ok := bf.flags[name]; ok {
+		bf.Err = fmt.Errorf("Duplicate flag defined: %s", name)
+		return nil
+	}
+
+	newFlag := &Flag{
+		bf:       bf,
+		name:     name,
+		flagType: flagType,
+	}
+	bf.flags[name] = newFlag
+
+	return newFlag
+}
+
+// IsUsed checks if the flag is used
+func (fl *Flag) IsUsed() bool {
+	if _, ok := fl.bf.used[fl.name]; ok {
+		return true
+	}
+	return false
+}
+
+// IsTrue checks if a bool flag is true
+func (fl *Flag) IsTrue() bool {
+	if fl.flagType != boolType {
+		// Should never get here
+		panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name))
+	}
+	return fl.Value == "true"
+}
+
+// Parse parses and checks if the BFlags is valid.
+// Any error noticed during the AddXXX() funcs will be generated/returned
+// here.  We do this because an error during AddXXX() is more like a
+// compile time error so it doesn't matter too much when we stop our
+// processing as long as we do stop it, so this allows the code
+// around AddXXX() to be just:
+//     defFlag := AddString("description", "")
+// w/o needing to add an if-statement around each one.
+func (bf *BFlags) Parse() error {
+	// If there was an error while defining the possible flags
+	// go ahead and bubble it back up here since we didn't do it
+	// earlier in the processing
+	if bf.Err != nil {
+		return fmt.Errorf("Error setting up flags: %s", bf.Err)
+	}
+
+	for _, arg := range bf.Args {
+		if !strings.HasPrefix(arg, "--") {
+			return fmt.Errorf("Arg should start with -- : %s", arg)
+		}
+
+		if arg == "--" {
+			return nil
+		}
+
+		arg = arg[2:]
+		value := ""
+
+		index := strings.Index(arg, "=")
+		if index >= 0 {
+			value = arg[index+1:]
+			arg = arg[:index]
+		}
+
+		flag, ok := bf.flags[arg]
+		if !ok {
+			return fmt.Errorf("Unknown flag: %s", arg)
+		}
+
+		if _, ok = bf.used[arg]; ok {
+			return fmt.Errorf("Duplicate flag specified: %s", arg)
+		}
+
+		bf.used[arg] = flag
+
+		switch flag.flagType {
+		case boolType:
+			// value == "" is only ok if no "=" was specified
+			if index >= 0 && value == "" {
+				return fmt.Errorf("Missing a value on flag: %s", arg)
+			}
+
+			lower := strings.ToLower(value)
+			if lower == "" {
+				flag.Value = "true"
+			} else if lower == "true" || lower == "false" {
+				flag.Value = lower
+			} else {
+				return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value)
+			}
+
+		case stringType:
+			if index < 0 {
+				return fmt.Errorf("Missing a value on flag: %s", arg)
+			}
+			flag.Value = value
+
+		default:
+			panic("No idea what kind of flag we have! Should never get here!")
+		}
+
+	}
+
+	return nil
+}
diff --git a/builder/dockerfile/instructions/bflag_test.go b/builder/dockerfile/instructions/bflag_test.go
new file mode 100644
index 0000000..b194ba7
--- /dev/null
+++ b/builder/dockerfile/instructions/bflag_test.go
@@ -0,0 +1,187 @@
+package instructions
+
+import (
+	"testing"
+)
+
+func TestBuilderFlags(t *testing.T) {
+	var expected string
+	var err error
+
+	// ---
+
+	bf := NewBFlags()
+	bf.Args = []string{}
+	if err := bf.Parse(); err != nil {
+		t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	bf.Args = []string{"--"}
+	if err := bf.Parse(); err != nil {
+		t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flStr1 := bf.AddString("str1", "")
+	flBool1 := bf.AddBool("bool1", false)
+	bf.Args = []string{}
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.IsUsed() {
+		t.Fatal("Test3 - str1 was not used!")
+	}
+	if flBool1.IsUsed() {
+		t.Fatal("Test3 - bool1 was not used!")
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.Value != "HI" {
+		t.Fatal("Str1 was supposed to default to: HI")
+	}
+	if flBool1.IsTrue() {
+		t.Fatal("Bool1 was supposed to default to: false")
+	}
+	if flStr1.IsUsed() {
+		t.Fatal("Str1 was not used!")
+	}
+	if flBool1.IsUsed() {
+		t.Fatal("Bool1 was not used!")
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1="}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	expected = ""
+	if flStr1.Value != expected {
+		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	bf.Args = []string{"--str1=BYE"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	expected = "BYE"
+	if flStr1.Value != expected {
+		t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected)
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if !flBool1.IsTrue() {
+		t.Fatal("Test-b1 Bool1 was supposed to be true")
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=true"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if !flBool1.IsTrue() {
+		t.Fatal("Test-b2 Bool1 was supposed to be true")
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=false"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flBool1.IsTrue() {
+		t.Fatal("Test-b3 Bool1 was supposed to be false")
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1=false1"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool2"}
+
+	if err = bf.Parse(); err == nil {
+		t.Fatalf("Test %q was supposed to fail", bf.Args)
+	}
+
+	// ---
+
+	bf = NewBFlags()
+	flStr1 = bf.AddString("str1", "HI")
+	flBool1 = bf.AddBool("bool1", false)
+	bf.Args = []string{"--bool1", "--str1=BYE"}
+
+	if err = bf.Parse(); err != nil {
+		t.Fatalf("Test %q was supposed to work: %s", bf.Args, err)
+	}
+
+	if flStr1.Value != "BYE" {
+		t.Fatalf("Test %s, str1 should be BYE", bf.Args)
+	}
+	if !flBool1.IsTrue() {
+		t.Fatalf("Test %s, bool1 should be true", bf.Args)
+	}
+}
diff --git a/builder/dockerfile/instructions/commands.go b/builder/dockerfile/instructions/commands.go
new file mode 100644
index 0000000..0863e6b
--- /dev/null
+++ b/builder/dockerfile/instructions/commands.go
@@ -0,0 +1,396 @@
+package instructions
+
+import (
+	"errors"
+
+	"strings"
+
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/api/types/strslice"
+)
+
+// KeyValuePair represent an arbitrary named value (useful in slice insted of map[string] string to preserve ordering)
+type KeyValuePair struct {
+	Key   string
+	Value string
+}
+
+func (kvp *KeyValuePair) String() string {
+	return kvp.Key + "=" + kvp.Value
+}
+
+// Command is implemented by every command present in a dockerfile
+type Command interface {
+	Name() string
+}
+
+// KeyValuePairs is a slice of KeyValuePair
+type KeyValuePairs []KeyValuePair
+
+// withNameAndCode is the base of every command in a Dockerfile (String() returns its source code)
+type withNameAndCode struct {
+	code string
+	name string
+}
+
+func (c *withNameAndCode) String() string {
+	return c.code
+}
+
+// Name of the command
+func (c *withNameAndCode) Name() string {
+	return c.name
+}
+
+func newWithNameAndCode(req parseRequest) withNameAndCode {
+	return withNameAndCode{code: strings.TrimSpace(req.original), name: req.command}
+}
+
+// SingleWordExpander is a provider for variable expansion where 1 word => 1 output
+type SingleWordExpander func(word string) (string, error)
+
+// SupportsSingleWordExpansion interface marks a command as supporting variable expansion
+type SupportsSingleWordExpansion interface {
+	Expand(expander SingleWordExpander) error
+}
+
+// PlatformSpecific adds platform checks to a command
+type PlatformSpecific interface {
+	CheckPlatform(platform string) error
+}
+
+func expandKvp(kvp KeyValuePair, expander SingleWordExpander) (KeyValuePair, error) {
+	key, err := expander(kvp.Key)
+	if err != nil {
+		return KeyValuePair{}, err
+	}
+	value, err := expander(kvp.Value)
+	if err != nil {
+		return KeyValuePair{}, err
+	}
+	return KeyValuePair{Key: key, Value: value}, nil
+}
+func expandKvpsInPlace(kvps KeyValuePairs, expander SingleWordExpander) error {
+	for i, kvp := range kvps {
+		newKvp, err := expandKvp(kvp, expander)
+		if err != nil {
+			return err
+		}
+		kvps[i] = newKvp
+	}
+	return nil
+}
+
+func expandSliceInPlace(values []string, expander SingleWordExpander) error {
+	for i, v := range values {
+		newValue, err := expander(v)
+		if err != nil {
+			return err
+		}
+		values[i] = newValue
+	}
+	return nil
+}
+
+// EnvCommand : ENV key1 value1 [keyN valueN...]
+type EnvCommand struct {
+	withNameAndCode
+	Env KeyValuePairs // kvp slice instead of map to preserve ordering
+}
+
+// Expand variables
+func (c *EnvCommand) Expand(expander SingleWordExpander) error {
+	return expandKvpsInPlace(c.Env, expander)
+}
+
+// MaintainerCommand : MAINTAINER maintainer_name
+type MaintainerCommand struct {
+	withNameAndCode
+	Maintainer string
+}
+
+// LabelCommand : LABEL some json data describing the image
+//
+// Sets the Label variable foo to bar,
+//
+type LabelCommand struct {
+	withNameAndCode
+	Labels KeyValuePairs // kvp slice instead of map to preserve ordering
+}
+
+// Expand variables
+func (c *LabelCommand) Expand(expander SingleWordExpander) error {
+	return expandKvpsInPlace(c.Labels, expander)
+}
+
+// SourcesAndDest represent a list of source files and a destination
+type SourcesAndDest []string
+
+// Sources list the source paths
+func (s SourcesAndDest) Sources() []string {
+	res := make([]string, len(s)-1)
+	copy(res, s[:len(s)-1])
+	return res
+}
+
+// Dest path of the operation
+func (s SourcesAndDest) Dest() string {
+	return s[len(s)-1]
+}
+
+// AddCommand : ADD foo /path
+//
+// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling
+// exist here. If you do not wish to have this automatic handling, use COPY.
+//
+type AddCommand struct {
+	withNameAndCode
+	SourcesAndDest
+	Chown string
+}
+
+// Expand variables
+func (c *AddCommand) Expand(expander SingleWordExpander) error {
+	return expandSliceInPlace(c.SourcesAndDest, expander)
+}
+
+// CopyCommand : COPY foo /path
+//
+// Same as 'ADD' but without the tar and remote url handling.
+//
+type CopyCommand struct {
+	withNameAndCode
+	SourcesAndDest
+	From  string
+	Chown string
+}
+
+// Expand variables
+func (c *CopyCommand) Expand(expander SingleWordExpander) error {
+	return expandSliceInPlace(c.SourcesAndDest, expander)
+}
+
+// OnbuildCommand : ONBUILD <some other command>
+type OnbuildCommand struct {
+	withNameAndCode
+	Expression string
+}
+
+// WorkdirCommand : WORKDIR /tmp
+//
+// Set the working directory for future RUN/CMD/etc statements.
+//
+type WorkdirCommand struct {
+	withNameAndCode
+	Path string
+}
+
+// Expand variables
+func (c *WorkdirCommand) Expand(expander SingleWordExpander) error {
+	p, err := expander(c.Path)
+	if err != nil {
+		return err
+	}
+	c.Path = p
+	return nil
+}
+
+// ShellDependantCmdLine represents a cmdline optionaly prepended with the shell
+type ShellDependantCmdLine struct {
+	CmdLine      strslice.StrSlice
+	PrependShell bool
+}
+
+// RunCommand : RUN some command yo
+//
+// run a command and commit the image. Args are automatically prepended with
+// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under
+// Windows, in the event there is only one argument The difference in processing:
+//
+// RUN echo hi          # sh -c echo hi       (Linux)
+// RUN echo hi          # cmd /S /C echo hi   (Windows)
+// RUN [ "echo", "hi" ] # echo hi
+//
+type RunCommand struct {
+	withNameAndCode
+	ShellDependantCmdLine
+}
+
+// CmdCommand : CMD foo
+//
+// Set the default command to run in the container (which may be empty).
+// Argument handling is the same as RUN.
+//
+type CmdCommand struct {
+	withNameAndCode
+	ShellDependantCmdLine
+}
+
+// HealthCheckCommand : HEALTHCHECK foo
+//
+// Set the default healthcheck command to run in the container (which may be empty).
+// Argument handling is the same as RUN.
+//
+type HealthCheckCommand struct {
+	withNameAndCode
+	Health *container.HealthConfig
+}
+
+// EntrypointCommand : ENTRYPOINT /usr/sbin/nginx
+//
+// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments
+// to /usr/sbin/nginx. Uses the default shell if not in JSON format.
+//
+// Handles command processing similar to CMD and RUN, only req.runConfig.Entrypoint
+// is initialized at newBuilder time instead of through argument parsing.
+//
+type EntrypointCommand struct {
+	withNameAndCode
+	ShellDependantCmdLine
+}
+
+// ExposeCommand : EXPOSE 6667/tcp 7000/tcp
+//
+// Expose ports for links and port mappings. This all ends up in
+// req.runConfig.ExposedPorts for runconfig.
+//
+type ExposeCommand struct {
+	withNameAndCode
+	Ports []string
+}
+
+// UserCommand : USER foo
+//
+// Set the user to 'foo' for future commands and when running the
+// ENTRYPOINT/CMD at container run time.
+//
+type UserCommand struct {
+	withNameAndCode
+	User string
+}
+
+// Expand variables
+func (c *UserCommand) Expand(expander SingleWordExpander) error {
+	p, err := expander(c.User)
+	if err != nil {
+		return err
+	}
+	c.User = p
+	return nil
+}
+
+// VolumeCommand : VOLUME /foo
+//
+// Expose the volume /foo for use. Will also accept the JSON array form.
+//
+type VolumeCommand struct {
+	withNameAndCode
+	Volumes []string
+}
+
+// Expand variables
+func (c *VolumeCommand) Expand(expander SingleWordExpander) error {
+	return expandSliceInPlace(c.Volumes, expander)
+}
+
+// StopSignalCommand : STOPSIGNAL signal
+//
+// Set the signal that will be used to kill the container.
+type StopSignalCommand struct {
+	withNameAndCode
+	Signal string
+}
+
+// Expand variables
+func (c *StopSignalCommand) Expand(expander SingleWordExpander) error {
+	p, err := expander(c.Signal)
+	if err != nil {
+		return err
+	}
+	c.Signal = p
+	return nil
+}
+
+// CheckPlatform checks that the command is supported in the target platform
+func (c *StopSignalCommand) CheckPlatform(platform string) error {
+	if platform == "windows" {
+		return errors.New("The daemon on this platform does not support the command stopsignal")
+	}
+	return nil
+}
+
+// ArgCommand : ARG name[=value]
+//
+// Adds the variable foo to the trusted list of variables that can be passed
+// to builder using the --build-arg flag for expansion/substitution or passing to 'run'.
+// Dockerfile author may optionally set a default value of this variable.
+type ArgCommand struct {
+	withNameAndCode
+	Key   string
+	Value *string
+}
+
+// Expand variables
+func (c *ArgCommand) Expand(expander SingleWordExpander) error {
+	p, err := expander(c.Key)
+	if err != nil {
+		return err
+	}
+	c.Key = p
+	if c.Value != nil {
+		p, err = expander(*c.Value)
+		if err != nil {
+			return err
+		}
+		c.Value = &p
+	}
+	return nil
+}
+
+// ShellCommand : SHELL powershell -command
+//
+// Set the non-default shell to use.
+type ShellCommand struct {
+	withNameAndCode
+	Shell strslice.StrSlice
+}
+
+// Stage represents a single stage in a multi-stage build
+type Stage struct {
+	Name       string
+	Commands   []Command
+	BaseName   string
+	SourceCode string
+}
+
+// AddCommand to the stage
+func (s *Stage) AddCommand(cmd Command) {
+	// todo: validate cmd type
+	s.Commands = append(s.Commands, cmd)
+}
+
+// IsCurrentStage check if the stage name is the current stage
+func IsCurrentStage(s []Stage, name string) bool {
+	if len(s) == 0 {
+		return false
+	}
+	return s[len(s)-1].Name == name
+}
+
+// CurrentStage return the last stage in a slice
+func CurrentStage(s []Stage) (*Stage, error) {
+	if len(s) == 0 {
+		return nil, errors.New("No build stage in current context")
+	}
+	return &s[len(s)-1], nil
+}
+
+// HasStage looks for the presence of a given stage name
+func HasStage(s []Stage, name string) (int, bool) {
+	for i, stage := range s {
+		if stage.Name == name {
+			return i, true
+		}
+	}
+	return -1, false
+}
diff --git a/builder/dockerfile/instructions/errors_unix.go b/builder/dockerfile/instructions/errors_unix.go
new file mode 100644
index 0000000..0b03b34
--- /dev/null
+++ b/builder/dockerfile/instructions/errors_unix.go
@@ -0,0 +1,9 @@
+// +build !windows
+
+package instructions
+
+import "fmt"
+
+func errNotJSON(command, _ string) error {
+	return fmt.Errorf("%s requires the arguments to be in JSON form", command)
+}
diff --git a/builder/dockerfile/instructions/errors_windows.go b/builder/dockerfile/instructions/errors_windows.go
new file mode 100644
index 0000000..a4843c5
--- /dev/null
+++ b/builder/dockerfile/instructions/errors_windows.go
@@ -0,0 +1,27 @@
+package instructions
+
+import (
+	"fmt"
+	"path/filepath"
+	"regexp"
+	"strings"
+)
+
+func errNotJSON(command, original string) error {
+	// For Windows users, give a hint if it looks like it might contain
+	// a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"],
+	// as JSON must be escaped. Unfortunate...
+	//
+	// Specifically looking for quote-driveletter-colon-backslash, there's no
+	// double backslash and a [] pair. No, this is not perfect, but it doesn't
+	// have to be. It's simply a hint to make life a little easier.
+	extra := ""
+	original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1)))
+	if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 &&
+		!strings.Contains(original, `\\`) &&
+		strings.Contains(original, "[") &&
+		strings.Contains(original, "]") {
+		extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original)
+	}
+	return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra)
+}
diff --git a/builder/dockerfile/instructions/parse.go b/builder/dockerfile/instructions/parse.go
new file mode 100644
index 0000000..0f7c69d
--- /dev/null
+++ b/builder/dockerfile/instructions/parse.go
@@ -0,0 +1,635 @@
+package instructions
+
+import (
+	"fmt"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/api/types/strslice"
+	"github.com/docker/docker/builder/dockerfile/command"
+	"github.com/docker/docker/builder/dockerfile/parser"
+	"github.com/pkg/errors"
+)
+
+type parseRequest struct {
+	command    string
+	args       []string
+	attributes map[string]bool
+	flags      *BFlags
+	original   string
+}
+
+func nodeArgs(node *parser.Node) []string {
+	result := []string{}
+	for ; node.Next != nil; node = node.Next {
+		arg := node.Next
+		if len(arg.Children) == 0 {
+			result = append(result, arg.Value)
+		} else if len(arg.Children) == 1 {
+			//sub command
+			result = append(result, arg.Children[0].Value)
+			result = append(result, nodeArgs(arg.Children[0])...)
+		}
+	}
+	return result
+}
+
+func newParseRequestFromNode(node *parser.Node) parseRequest {
+	return parseRequest{
+		command:    node.Value,
+		args:       nodeArgs(node),
+		attributes: node.Attributes,
+		original:   node.Original,
+		flags:      NewBFlagsWithArgs(node.Flags),
+	}
+}
+
+// ParseInstruction converts an AST to a typed instruction (either a command or a build stage beginning when encountering a `FROM` statement)
+func ParseInstruction(node *parser.Node) (interface{}, error) {
+	req := newParseRequestFromNode(node)
+	switch node.Value {
+	case command.Env:
+		return parseEnv(req)
+	case command.Maintainer:
+		return parseMaintainer(req)
+	case command.Label:
+		return parseLabel(req)
+	case command.Add:
+		return parseAdd(req)
+	case command.Copy:
+		return parseCopy(req)
+	case command.From:
+		return parseFrom(req)
+	case command.Onbuild:
+		return parseOnBuild(req)
+	case command.Workdir:
+		return parseWorkdir(req)
+	case command.Run:
+		return parseRun(req)
+	case command.Cmd:
+		return parseCmd(req)
+	case command.Healthcheck:
+		return parseHealthcheck(req)
+	case command.Entrypoint:
+		return parseEntrypoint(req)
+	case command.Expose:
+		return parseExpose(req)
+	case command.User:
+		return parseUser(req)
+	case command.Volume:
+		return parseVolume(req)
+	case command.StopSignal:
+		return parseStopSignal(req)
+	case command.Arg:
+		return parseArg(req)
+	case command.Shell:
+		return parseShell(req)
+	}
+
+	return nil, &UnknownInstruction{Instruction: node.Value, Line: node.StartLine}
+}
+
+// ParseCommand converts an AST to a typed Command
+func ParseCommand(node *parser.Node) (Command, error) {
+	s, err := ParseInstruction(node)
+	if err != nil {
+		return nil, err
+	}
+	if c, ok := s.(Command); ok {
+		return c, nil
+	}
+	return nil, errors.Errorf("%T is not a command type", s)
+}
+
+// UnknownInstruction represents an error occurring when a command is unresolvable
+type UnknownInstruction struct {
+	Line        int
+	Instruction string
+}
+
+func (e *UnknownInstruction) Error() string {
+	return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction))
+}
+
+// IsUnknownInstruction checks if the error is an UnknownInstruction or a parseError containing an UnknownInstruction
+func IsUnknownInstruction(err error) bool {
+	_, ok := err.(*UnknownInstruction)
+	if !ok {
+		var pe *parseError
+		if pe, ok = err.(*parseError); ok {
+			_, ok = pe.inner.(*UnknownInstruction)
+		}
+	}
+	return ok
+}
+
+type parseError struct {
+	inner error
+	node  *parser.Node
+}
+
+func (e *parseError) Error() string {
+	return fmt.Sprintf("Dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error())
+}
+
+// Parse a docker file into a collection of buildable stages
+func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) {
+	for _, n := range ast.Children {
+		cmd, err := ParseInstruction(n)
+		if err != nil {
+			return nil, nil, &parseError{inner: err, node: n}
+		}
+		if len(stages) == 0 {
+			// meta arg case
+			if a, isArg := cmd.(*ArgCommand); isArg {
+				metaArgs = append(metaArgs, *a)
+				continue
+			}
+		}
+		switch c := cmd.(type) {
+		case *Stage:
+			stages = append(stages, *c)
+		case Command:
+			stage, err := CurrentStage(stages)
+			if err != nil {
+				return nil, nil, err
+			}
+			stage.AddCommand(c)
+		default:
+			return nil, nil, errors.Errorf("%T is not a command type", cmd)
+		}
+
+	}
+	return stages, metaArgs, nil
+}
+
+func parseKvps(args []string, cmdName string) (KeyValuePairs, error) {
+	if len(args) == 0 {
+		return nil, errAtLeastOneArgument(cmdName)
+	}
+	if len(args)%2 != 0 {
+		// should never get here, but just in case
+		return nil, errTooManyArguments(cmdName)
+	}
+	var res KeyValuePairs
+	for j := 0; j < len(args); j += 2 {
+		if len(args[j]) == 0 {
+			return nil, errBlankCommandNames(cmdName)
+		}
+		name := args[j]
+		value := args[j+1]
+		res = append(res, KeyValuePair{Key: name, Value: value})
+	}
+	return res, nil
+}
+
+func parseEnv(req parseRequest) (*EnvCommand, error) {
+
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	envs, err := parseKvps(req.args, "ENV")
+	if err != nil {
+		return nil, err
+	}
+	return &EnvCommand{
+		Env:             envs,
+		withNameAndCode: newWithNameAndCode(req),
+	}, nil
+}
+
+func parseMaintainer(req parseRequest) (*MaintainerCommand, error) {
+	if len(req.args) != 1 {
+		return nil, errExactlyOneArgument("MAINTAINER")
+	}
+
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	return &MaintainerCommand{
+		Maintainer:      req.args[0],
+		withNameAndCode: newWithNameAndCode(req),
+	}, nil
+}
+
+func parseLabel(req parseRequest) (*LabelCommand, error) {
+
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+
+	labels, err := parseKvps(req.args, "LABEL")
+	if err != nil {
+		return nil, err
+	}
+
+	return &LabelCommand{
+		Labels:          labels,
+		withNameAndCode: newWithNameAndCode(req),
+	}, nil
+}
+
+func parseAdd(req parseRequest) (*AddCommand, error) {
+	if len(req.args) < 2 {
+		return nil, errNoDestinationArgument("ADD")
+	}
+	flChown := req.flags.AddString("chown", "")
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	return &AddCommand{
+		SourcesAndDest:  SourcesAndDest(req.args),
+		withNameAndCode: newWithNameAndCode(req),
+		Chown:           flChown.Value,
+	}, nil
+}
+
+func parseCopy(req parseRequest) (*CopyCommand, error) {
+	if len(req.args) < 2 {
+		return nil, errNoDestinationArgument("COPY")
+	}
+	flChown := req.flags.AddString("chown", "")
+	flFrom := req.flags.AddString("from", "")
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	return &CopyCommand{
+		SourcesAndDest:  SourcesAndDest(req.args),
+		From:            flFrom.Value,
+		withNameAndCode: newWithNameAndCode(req),
+		Chown:           flChown.Value,
+	}, nil
+}
+
+func parseFrom(req parseRequest) (*Stage, error) {
+	stageName, err := parseBuildStageName(req.args)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	code := strings.TrimSpace(req.original)
+
+	return &Stage{
+		BaseName:   req.args[0],
+		Name:       stageName,
+		SourceCode: code,
+		Commands:   []Command{},
+	}, nil
+
+}
+
+func parseBuildStageName(args []string) (string, error) {
+	stageName := ""
+	switch {
+	case len(args) == 3 && strings.EqualFold(args[1], "as"):
+		stageName = strings.ToLower(args[2])
+		if ok, _ := regexp.MatchString("^[a-z][a-z0-9-_\\.]*$", stageName); !ok {
+			return "", errors.Errorf("invalid name for build stage: %q, name can't start with a number or contain symbols", stageName)
+		}
+	case len(args) != 1:
+		return "", errors.New("FROM requires either one or three arguments")
+	}
+
+	return stageName, nil
+}
+
+func parseOnBuild(req parseRequest) (*OnbuildCommand, error) {
+	if len(req.args) == 0 {
+		return nil, errAtLeastOneArgument("ONBUILD")
+	}
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+
+	triggerInstruction := strings.ToUpper(strings.TrimSpace(req.args[0]))
+	switch strings.ToUpper(triggerInstruction) {
+	case "ONBUILD":
+		return nil, errors.New("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed")
+	case "MAINTAINER", "FROM":
+		return nil, fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction)
+	}
+
+	original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "")
+	return &OnbuildCommand{
+		Expression:      original,
+		withNameAndCode: newWithNameAndCode(req),
+	}, nil
+
+}
+
+func parseWorkdir(req parseRequest) (*WorkdirCommand, error) {
+	if len(req.args) != 1 {
+		return nil, errExactlyOneArgument("WORKDIR")
+	}
+
+	err := req.flags.Parse()
+	if err != nil {
+		return nil, err
+	}
+	return &WorkdirCommand{
+		Path:            req.args[0],
+		withNameAndCode: newWithNameAndCode(req),
+	}, nil
+
+}
+
+func parseShellDependentCommand(req parseRequest, emptyAsNil bool) ShellDependantCmdLine {
+	args := handleJSONArgs(req.args, req.attributes)
+	cmd := strslice.StrSlice(args)
+	if emptyAsNil && len(cmd) == 0 {
+		cmd = nil
+	}
+	return ShellDependantCmdLine{
+		CmdLine:      cmd,
+		PrependShell: !req.attributes["json"],
+	}
+}
+
+func parseRun(req parseRequest) (*RunCommand, error) {
+
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	return &RunCommand{
+		ShellDependantCmdLine: parseShellDependentCommand(req, false),
+		withNameAndCode:       newWithNameAndCode(req),
+	}, nil
+
+}
+
+func parseCmd(req parseRequest) (*CmdCommand, error) {
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	return &CmdCommand{
+		ShellDependantCmdLine: parseShellDependentCommand(req, false),
+		withNameAndCode:       newWithNameAndCode(req),
+	}, nil
+
+}
+
+func parseEntrypoint(req parseRequest) (*EntrypointCommand, error) {
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+
+	cmd := &EntrypointCommand{
+		ShellDependantCmdLine: parseShellDependentCommand(req, true),
+		withNameAndCode:       newWithNameAndCode(req),
+	}
+
+	return cmd, nil
+}
+
+// parseOptInterval(flag) is the duration of flag.Value, or 0 if
+// empty. An error is reported if the value is given and less than minimum duration.
+func parseOptInterval(f *Flag) (time.Duration, error) {
+	s := f.Value
+	if s == "" {
+		return 0, nil
+	}
+	d, err := time.ParseDuration(s)
+	if err != nil {
+		return 0, err
+	}
+	if d < container.MinimumDuration {
+		return 0, fmt.Errorf("Interval %#v cannot be less than %s", f.name, container.MinimumDuration)
+	}
+	return d, nil
+}
+func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) {
+	if len(req.args) == 0 {
+		return nil, errAtLeastOneArgument("HEALTHCHECK")
+	}
+	cmd := &HealthCheckCommand{
+		withNameAndCode: newWithNameAndCode(req),
+	}
+
+	typ := strings.ToUpper(req.args[0])
+	args := req.args[1:]
+	if typ == "NONE" {
+		if len(args) != 0 {
+			return nil, errors.New("HEALTHCHECK NONE takes no arguments")
+		}
+		test := strslice.StrSlice{typ}
+		cmd.Health = &container.HealthConfig{
+			Test: test,
+		}
+	} else {
+
+		healthcheck := container.HealthConfig{}
+
+		flInterval := req.flags.AddString("interval", "")
+		flTimeout := req.flags.AddString("timeout", "")
+		flStartPeriod := req.flags.AddString("start-period", "")
+		flRetries := req.flags.AddString("retries", "")
+
+		if err := req.flags.Parse(); err != nil {
+			return nil, err
+		}
+
+		switch typ {
+		case "CMD":
+			cmdSlice := handleJSONArgs(args, req.attributes)
+			if len(cmdSlice) == 0 {
+				return nil, errors.New("Missing command after HEALTHCHECK CMD")
+			}
+
+			if !req.attributes["json"] {
+				typ = "CMD-SHELL"
+			}
+
+			healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...))
+		default:
+			return nil, fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ)
+		}
+
+		interval, err := parseOptInterval(flInterval)
+		if err != nil {
+			return nil, err
+		}
+		healthcheck.Interval = interval
+
+		timeout, err := parseOptInterval(flTimeout)
+		if err != nil {
+			return nil, err
+		}
+		healthcheck.Timeout = timeout
+
+		startPeriod, err := parseOptInterval(flStartPeriod)
+		if err != nil {
+			return nil, err
+		}
+		healthcheck.StartPeriod = startPeriod
+
+		if flRetries.Value != "" {
+			retries, err := strconv.ParseInt(flRetries.Value, 10, 32)
+			if err != nil {
+				return nil, err
+			}
+			if retries < 1 {
+				return nil, fmt.Errorf("--retries must be at least 1 (not %d)", retries)
+			}
+			healthcheck.Retries = int(retries)
+		} else {
+			healthcheck.Retries = 0
+		}
+
+		cmd.Health = &healthcheck
+	}
+	return cmd, nil
+}
+
+func parseExpose(req parseRequest) (*ExposeCommand, error) {
+	portsTab := req.args
+
+	if len(req.args) == 0 {
+		return nil, errAtLeastOneArgument("EXPOSE")
+	}
+
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+
+	sort.Strings(portsTab)
+	return &ExposeCommand{
+		Ports:           portsTab,
+		withNameAndCode: newWithNameAndCode(req),
+	}, nil
+}
+
+func parseUser(req parseRequest) (*UserCommand, error) {
+	if len(req.args) != 1 {
+		return nil, errExactlyOneArgument("USER")
+	}
+
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	return &UserCommand{
+		User:            req.args[0],
+		withNameAndCode: newWithNameAndCode(req),
+	}, nil
+}
+
+func parseVolume(req parseRequest) (*VolumeCommand, error) {
+	if len(req.args) == 0 {
+		return nil, errAtLeastOneArgument("VOLUME")
+	}
+
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+
+	cmd := &VolumeCommand{
+		withNameAndCode: newWithNameAndCode(req),
+	}
+
+	for _, v := range req.args {
+		v = strings.TrimSpace(v)
+		if v == "" {
+			return nil, errors.New("VOLUME specified can not be an empty string")
+		}
+		cmd.Volumes = append(cmd.Volumes, v)
+	}
+	return cmd, nil
+
+}
+
+func parseStopSignal(req parseRequest) (*StopSignalCommand, error) {
+	if len(req.args) != 1 {
+		return nil, errExactlyOneArgument("STOPSIGNAL")
+	}
+	sig := req.args[0]
+
+	cmd := &StopSignalCommand{
+		Signal:          sig,
+		withNameAndCode: newWithNameAndCode(req),
+	}
+	return cmd, nil
+
+}
+
+func parseArg(req parseRequest) (*ArgCommand, error) {
+	if len(req.args) != 1 {
+		return nil, errExactlyOneArgument("ARG")
+	}
+
+	var (
+		name     string
+		newValue *string
+	)
+
+	arg := req.args[0]
+	// 'arg' can just be a name or name-value pair. Note that this is different
+	// from 'env' that handles the split of name and value at the parser level.
+	// The reason for doing it differently for 'arg' is that we support just
+	// defining an arg and not assign it a value (while 'env' always expects a
+	// name-value pair). If possible, it will be good to harmonize the two.
+	if strings.Contains(arg, "=") {
+		parts := strings.SplitN(arg, "=", 2)
+		if len(parts[0]) == 0 {
+			return nil, errBlankCommandNames("ARG")
+		}
+
+		name = parts[0]
+		newValue = &parts[1]
+	} else {
+		name = arg
+	}
+
+	return &ArgCommand{
+		Key:             name,
+		Value:           newValue,
+		withNameAndCode: newWithNameAndCode(req),
+	}, nil
+}
+
+func parseShell(req parseRequest) (*ShellCommand, error) {
+	if err := req.flags.Parse(); err != nil {
+		return nil, err
+	}
+	shellSlice := handleJSONArgs(req.args, req.attributes)
+	switch {
+	case len(shellSlice) == 0:
+		// SHELL []
+		return nil, errAtLeastOneArgument("SHELL")
+	case req.attributes["json"]:
+		// SHELL ["powershell", "-command"]
+
+		return &ShellCommand{
+			Shell:           strslice.StrSlice(shellSlice),
+			withNameAndCode: newWithNameAndCode(req),
+		}, nil
+	default:
+		// SHELL powershell -command - not JSON
+		return nil, errNotJSON("SHELL", req.original)
+	}
+}
+
+func errAtLeastOneArgument(command string) error {
+	return errors.Errorf("%s requires at least one argument", command)
+}
+
+func errExactlyOneArgument(command string) error {
+	return errors.Errorf("%s requires exactly one argument", command)
+}
+
+func errNoDestinationArgument(command string) error {
+	return errors.Errorf("%s requires at least two arguments, but only one was provided. Destination could not be determined.", command)
+}
+
+func errBlankCommandNames(command string) error {
+	return errors.Errorf("%s names can not be blank", command)
+}
+
+func errTooManyArguments(command string) error {
+	return errors.Errorf("Bad input to %s, too many arguments", command)
+}
diff --git a/builder/dockerfile/instructions/parse_test.go b/builder/dockerfile/instructions/parse_test.go
new file mode 100644
index 0000000..6ac3dfd
--- /dev/null
+++ b/builder/dockerfile/instructions/parse_test.go
@@ -0,0 +1,200 @@
+package instructions
+
+import (
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/builder/dockerfile/command"
+	"github.com/docker/docker/builder/dockerfile/parser"
+	"github.com/docker/docker/internal/testutil"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestCommandsExactlyOneArgument(t *testing.T) {
+	commands := []string{
+		"MAINTAINER",
+		"WORKDIR",
+		"USER",
+		"STOPSIGNAL",
+	}
+
+	for _, command := range commands {
+		ast, err := parser.Parse(strings.NewReader(command))
+		require.NoError(t, err)
+		_, err = ParseInstruction(ast.AST.Children[0])
+		assert.EqualError(t, err, errExactlyOneArgument(command).Error())
+	}
+}
+
+func TestCommandsAtLeastOneArgument(t *testing.T) {
+	commands := []string{
+		"ENV",
+		"LABEL",
+		"ONBUILD",
+		"HEALTHCHECK",
+		"EXPOSE",
+		"VOLUME",
+	}
+
+	for _, command := range commands {
+		ast, err := parser.Parse(strings.NewReader(command))
+		require.NoError(t, err)
+		_, err = ParseInstruction(ast.AST.Children[0])
+		assert.EqualError(t, err, errAtLeastOneArgument(command).Error())
+	}
+}
+
+func TestCommandsNoDestinationArgument(t *testing.T) {
+	commands := []string{
+		"ADD",
+		"COPY",
+	}
+
+	for _, command := range commands {
+		ast, err := parser.Parse(strings.NewReader(command + " arg1"))
+		require.NoError(t, err)
+		_, err = ParseInstruction(ast.AST.Children[0])
+		assert.EqualError(t, err, errNoDestinationArgument(command).Error())
+	}
+}
+
+func TestCommandsTooManyArguments(t *testing.T) {
+	commands := []string{
+		"ENV",
+		"LABEL",
+	}
+
+	for _, command := range commands {
+		node := &parser.Node{
+			Original: command + "arg1 arg2 arg3",
+			Value:    strings.ToLower(command),
+			Next: &parser.Node{
+				Value: "arg1",
+				Next: &parser.Node{
+					Value: "arg2",
+					Next: &parser.Node{
+						Value: "arg3",
+					},
+				},
+			},
+		}
+		_, err := ParseInstruction(node)
+		assert.EqualError(t, err, errTooManyArguments(command).Error())
+	}
+}
+
+func TestCommandsBlankNames(t *testing.T) {
+	commands := []string{
+		"ENV",
+		"LABEL",
+	}
+
+	for _, command := range commands {
+		node := &parser.Node{
+			Original: command + " =arg2",
+			Value:    strings.ToLower(command),
+			Next: &parser.Node{
+				Value: "",
+				Next: &parser.Node{
+					Value: "arg2",
+				},
+			},
+		}
+		_, err := ParseInstruction(node)
+		assert.EqualError(t, err, errBlankCommandNames(command).Error())
+	}
+}
+
+func TestHealthCheckCmd(t *testing.T) {
+	node := &parser.Node{
+		Value: command.Healthcheck,
+		Next: &parser.Node{
+			Value: "CMD",
+			Next: &parser.Node{
+				Value: "hello",
+				Next: &parser.Node{
+					Value: "world",
+				},
+			},
+		},
+	}
+	cmd, err := ParseInstruction(node)
+	assert.NoError(t, err)
+	hc, ok := cmd.(*HealthCheckCommand)
+	assert.True(t, ok)
+	expected := []string{"CMD-SHELL", "hello world"}
+	assert.Equal(t, expected, hc.Health.Test)
+}
+
+func TestParseOptInterval(t *testing.T) {
+	flInterval := &Flag{
+		name:     "interval",
+		flagType: stringType,
+		Value:    "50ns",
+	}
+	_, err := parseOptInterval(flInterval)
+	testutil.ErrorContains(t, err, "cannot be less than 1ms")
+
+	flInterval.Value = "1ms"
+	_, err = parseOptInterval(flInterval)
+	require.NoError(t, err)
+}
+
+func TestErrorCases(t *testing.T) {
+	cases := []struct {
+		name          string
+		dockerfile    string
+		expectedError string
+	}{
+		{
+			name: "copyEmptyWhitespace",
+			dockerfile: `COPY	
+		quux \
+      bar`,
+			expectedError: "COPY requires at least two arguments",
+		},
+		{
+			name:          "ONBUILD forbidden FROM",
+			dockerfile:    "ONBUILD FROM scratch",
+			expectedError: "FROM isn't allowed as an ONBUILD trigger",
+		},
+		{
+			name:          "ONBUILD forbidden MAINTAINER",
+			dockerfile:    "ONBUILD MAINTAINER docker.io",
+			expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger",
+		},
+		{
+			name:          "ARG two arguments",
+			dockerfile:    "ARG foo bar",
+			expectedError: "ARG requires exactly one argument",
+		},
+		{
+			name:          "MAINTAINER unknown flag",
+			dockerfile:    "MAINTAINER --boo joe@example.com",
+			expectedError: "Unknown flag: boo",
+		},
+		{
+			name:          "Chaining ONBUILD",
+			dockerfile:    `ONBUILD ONBUILD RUN touch foobar`,
+			expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed",
+		},
+		{
+			name:          "Invalid instruction",
+			dockerfile:    `foo bar`,
+			expectedError: "unknown instruction: FOO",
+		},
+	}
+	for _, c := range cases {
+		r := strings.NewReader(c.dockerfile)
+		ast, err := parser.Parse(r)
+
+		if err != nil {
+			t.Fatalf("Error when parsing Dockerfile: %s", err)
+		}
+		n := ast.AST.Children[0]
+		_, err = ParseInstruction(n)
+		testutil.ErrorContains(t, err, c.expectedError)
+	}
+
+}
diff --git a/builder/dockerfile/instructions/support.go b/builder/dockerfile/instructions/support.go
new file mode 100644
index 0000000..beefe77
--- /dev/null
+++ b/builder/dockerfile/instructions/support.go
@@ -0,0 +1,19 @@
+package instructions
+
+import "strings"
+
+// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile
+// for exec form it returns untouched args slice
+// for shell form it returns concatenated args as the first element of a slice
+func handleJSONArgs(args []string, attributes map[string]bool) []string {
+	if len(args) == 0 {
+		return []string{}
+	}
+
+	if attributes != nil && attributes["json"] {
+		return args
+	}
+
+	// literal string command, not an exec array
+	return []string{strings.Join(args, " ")}
+}
diff --git a/builder/dockerfile/instructions/support_test.go b/builder/dockerfile/instructions/support_test.go
new file mode 100644
index 0000000..2b888dc
--- /dev/null
+++ b/builder/dockerfile/instructions/support_test.go
@@ -0,0 +1,65 @@
+package instructions
+
+import "testing"
+
+type testCase struct {
+	name       string
+	args       []string
+	attributes map[string]bool
+	expected   []string
+}
+
+func initTestCases() []testCase {
+	testCases := []testCase{}
+
+	testCases = append(testCases, testCase{
+		name:       "empty args",
+		args:       []string{},
+		attributes: make(map[string]bool),
+		expected:   []string{},
+	})
+
+	jsonAttributes := make(map[string]bool)
+	jsonAttributes["json"] = true
+
+	testCases = append(testCases, testCase{
+		name:       "json attribute with one element",
+		args:       []string{"foo"},
+		attributes: jsonAttributes,
+		expected:   []string{"foo"},
+	})
+
+	testCases = append(testCases, testCase{
+		name:       "json attribute with two elements",
+		args:       []string{"foo", "bar"},
+		attributes: jsonAttributes,
+		expected:   []string{"foo", "bar"},
+	})
+
+	testCases = append(testCases, testCase{
+		name:       "no attributes",
+		args:       []string{"foo", "bar"},
+		attributes: nil,
+		expected:   []string{"foo bar"},
+	})
+
+	return testCases
+}
+
+func TestHandleJSONArgs(t *testing.T) {
+	testCases := initTestCases()
+
+	for _, test := range testCases {
+		arguments := handleJSONArgs(test.args, test.attributes)
+
+		if len(arguments) != len(test.expected) {
+			t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments))
+		}
+
+		for i := range test.expected {
+			if arguments[i] != test.expected[i] {
+				t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i])
+			}
+		}
+	}
+}
diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go
index b18118c..0e08ec2 100644
--- a/builder/dockerfile/internals.go
+++ b/builder/dockerfile/internals.go
@@ -7,6 +7,9 @@
 	"crypto/sha256"
 	"encoding/hex"
 	"fmt"
+	"io"
+	"os"
+	"path"
 	"path/filepath"
 	"strconv"
 	"strings"
@@ -15,13 +18,63 @@
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/pkg/system"
 	lcUser "github.com/opencontainers/runc/libcontainer/user"
 	"github.com/pkg/errors"
 )
 
+// Archiver defines an interface for copying files from one destination to
+// another using Tar/Untar.
+type Archiver interface {
+	TarUntar(src, dst string) error
+	UntarPath(src, dst string) error
+	CopyWithTar(src, dst string) error
+	CopyFileWithTar(src, dst string) error
+	IDMappings() *idtools.IDMappings
+}
+
+// The builder will use the following interfaces if the container fs implements
+// these for optimized copies to and from the container.
+type extractor interface {
+	ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
+}
+
+type archiver interface {
+	ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
+}
+
+// helper functions to get tar/untar func
+func untarFunc(i interface{}) containerfs.UntarFunc {
+	if ea, ok := i.(extractor); ok {
+		return ea.ExtractArchive
+	}
+	return chrootarchive.Untar
+}
+
+func tarFunc(i interface{}) containerfs.TarFunc {
+	if ap, ok := i.(archiver); ok {
+		return ap.ArchivePath
+	}
+	return archive.TarWithOptions
+}
+
+func (b *Builder) getArchiver(src, dst containerfs.Driver) Archiver {
+	t, u := tarFunc(src), untarFunc(dst)
+	return &containerfs.Archiver{
+		SrcDriver:     src,
+		DstDriver:     dst,
+		Tar:           t,
+		Untar:         u,
+		IDMappingsVar: b.idMappings,
+	}
+}
+
 func (b *Builder) commit(dispatchState *dispatchState, comment string) error {
 	if b.disableCommit {
 		return nil
@@ -30,7 +83,8 @@
 		return errors.New("Please provide a source image with `from` prior to commit")
 	}
 
-	runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, b.platform))
+	optionsPlatform := system.ParsePlatform(b.options.Platform)
+	runConfigWithCommentCmd := copyRunConfig(dispatchState.runConfig, withCmdComment(comment, optionsPlatform.OS))
 	hit, err := b.probeCache(dispatchState, runConfigWithCommentCmd)
 	if err != nil || hit {
 		return err
@@ -65,12 +119,12 @@
 	}
 
 	dispatchState.imageID = imageID
-	b.buildStages.update(imageID)
 	return nil
 }
 
 func (b *Builder) exportImage(state *dispatchState, imageMount *imageMount, runConfig *container.Config) error {
-	newLayer, err := imageMount.Layer().Commit(b.platform)
+	optionsPlatform := system.ParsePlatform(b.options.Platform)
+	newLayer, err := imageMount.Layer().Commit(optionsPlatform.OS)
 	if err != nil {
 		return err
 	}
@@ -105,7 +159,6 @@
 
 	state.imageID = exportedImage.ImageID()
 	b.imageSources.Add(newImageMount(exportedImage, newLayer))
-	b.buildStages.update(state.imageID)
 	return nil
 }
 
@@ -119,9 +172,10 @@
 	commentStr := fmt.Sprintf("%s %s%s in %s ", inst.cmdName, chownComment, srcHash, inst.dest)
 
 	// TODO: should this have been using origPaths instead of srcHash in the comment?
+	optionsPlatform := system.ParsePlatform(b.options.Platform)
 	runConfigWithCommentCmd := copyRunConfig(
 		state.runConfig,
-		withCmdCommentString(commentStr, b.platform))
+		withCmdCommentString(commentStr, optionsPlatform.OS))
 	hit, err := b.probeCache(state, runConfigWithCommentCmd)
 	if err != nil || hit {
 		return err
@@ -131,28 +185,29 @@
 	if err != nil {
 		return errors.Wrapf(err, "failed to get destination image %q", state.imageID)
 	}
-	destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount)
+
+	destInfo, err := createDestInfo(state.runConfig.WorkingDir, inst, imageMount, b.options.Platform)
 	if err != nil {
 		return err
 	}
 
-	chownPair := b.archiver.IDMappings.RootPair()
+	chownPair := b.idMappings.RootPair()
 	// if a chown was requested, perform the steps to get the uid, gid
 	// translated (if necessary because of user namespaces), and replace
 	// the root pair with the chown pair for copy operations
 	if inst.chownStr != "" {
-		chownPair, err = parseChownFlag(inst.chownStr, destInfo.root, b.archiver.IDMappings)
+		chownPair, err = parseChownFlag(inst.chownStr, destInfo.root.Path(), b.idMappings)
 		if err != nil {
 			return errors.Wrapf(err, "unable to convert uid/gid chown string to host mapping")
 		}
 	}
 
-	opts := copyFileOptions{
-		decompress: inst.allowLocalDecompression,
-		archiver:   b.archiver,
-		chownPair:  chownPair,
-	}
 	for _, info := range inst.infos {
+		opts := copyFileOptions{
+			decompress: inst.allowLocalDecompression,
+			archiver:   b.getArchiver(info.root, destInfo.root),
+			chownPair:  chownPair,
+		}
 		if err := performCopyForInfo(destInfo, info, opts); err != nil {
 			return errors.Wrapf(err, "failed to copy files")
 		}
@@ -206,10 +261,7 @@
 		return uid, nil
 	}
 	users, err := lcUser.ParsePasswdFileFilter(filepath, func(u lcUser.User) bool {
-		if u.Name == userStr {
-			return true
-		}
-		return false
+		return u.Name == userStr
 	})
 	if err != nil {
 		return 0, err
@@ -228,10 +280,7 @@
 		return gid, nil
 	}
 	groups, err := lcUser.ParseGroupFileFilter(filepath, func(g lcUser.Group) bool {
-		if g.Name == groupStr {
-			return true
-		}
-		return false
+		return g.Name == groupStr
 	})
 	if err != nil {
 		return 0, err
@@ -242,10 +291,10 @@
 	return groups[0].Gid, nil
 }
 
-func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount) (copyInfo, error) {
+func createDestInfo(workingDir string, inst copyInstruction, imageMount *imageMount, platform string) (copyInfo, error) {
 	// Twiddle the destination when it's a relative path - meaning, make it
 	// relative to the WORKINGDIR
-	dest, err := normalizeDest(workingDir, inst.dest)
+	dest, err := normalizeDest(workingDir, inst.dest, platform)
 	if err != nil {
 		return copyInfo{}, errors.Wrapf(err, "invalid %s", inst.cmdName)
 	}
@@ -258,6 +307,63 @@
 	return newCopyInfoFromSource(destMount, dest, ""), nil
 }
 
+// normalizeDest normalises the destination of a COPY/ADD command in a
+// platform semantically consistent way.
+func normalizeDest(workingDir, requested string, platform string) (string, error) {
+	dest := fromSlash(requested, platform)
+	endsInSlash := strings.HasSuffix(dest, string(separator(platform)))
+
+	if platform != "windows" {
+		if !path.IsAbs(requested) {
+			dest = path.Join("/", filepath.ToSlash(workingDir), dest)
+			// Make sure we preserve any trailing slash
+			if endsInSlash {
+				dest += "/"
+			}
+		}
+		return dest, nil
+	}
+
+	// We are guaranteed that the working directory is already consistent,
+	// However, Windows also has, for now, the limitation that ADD/COPY can
+	// only be done to the system drive, not any drives that might be present
+	// as a result of a bind mount.
+	//
+	// So... if the path requested is Linux-style absolute (/foo or \\foo),
+	// we assume it is the system drive. If it is a Windows-style absolute
+	// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
+	// strip any configured working directories drive letter so that it
+	// can be subsequently legitimately converted to a Windows volume-style
+	// pathname.
+
+	// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
+	// we only want to validate where the DriveColon part has been supplied.
+	if filepath.IsAbs(dest) {
+		if strings.ToUpper(string(dest[0])) != "C" {
+			return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
+		}
+		dest = dest[2:] // Strip the drive letter
+	}
+
+	// Cannot handle relative where WorkingDir is not the system drive.
+	if len(workingDir) > 0 {
+		if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
+			return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
+		}
+		if !system.IsAbs(dest) {
+			if string(workingDir[0]) != "C" {
+				return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
+			}
+			dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
+			// Make sure we preserve any trailing slash
+			if endsInSlash {
+				dest += string(os.PathSeparator)
+			}
+		}
+	}
+	return dest, nil
+}
+
 // For backwards compat, if there's just one info then use it as the
 // cache look-up string, otherwise hash 'em all into one
 func getSourceHashFromInfos(infos []copyInfo) string {
@@ -334,9 +440,9 @@
 
 // getShell is a helper function which gets the right shell for prefixing the
 // shell-form of RUN, ENTRYPOINT and CMD instructions
-func getShell(c *container.Config, platform string) []string {
+func getShell(c *container.Config, os string) []string {
 	if 0 == len(c.Shell) {
-		return append([]string{}, defaultShellForPlatform(platform)[:]...)
+		return append([]string{}, defaultShellForOS(os)[:]...)
 	}
 	return append([]string{}, c.Shell[:]...)
 }
@@ -349,7 +455,6 @@
 	fmt.Fprint(b.Stdout, " ---> Using cache\n")
 
 	dispatchState.imageID = cachedID
-	b.buildStages.update(dispatchState.imageID)
 	return true, nil
 }
 
@@ -361,13 +466,15 @@
 	}
 	// Set a log config to override any default value set on the daemon
 	hostConfig := &container.HostConfig{LogConfig: defaultLogConfig}
-	container, err := b.containerManager.Create(runConfig, hostConfig, b.platform)
+	optionsPlatform := system.ParsePlatform(b.options.Platform)
+	container, err := b.containerManager.Create(runConfig, hostConfig, optionsPlatform.OS)
 	return container.ID, err
 }
 
 func (b *Builder) create(runConfig *container.Config) (string, error) {
 	hostConfig := hostConfigFromOptions(b.options)
-	container, err := b.containerManager.Create(runConfig, hostConfig, b.platform)
+	optionsPlatform := system.ParsePlatform(b.options.Platform)
+	container, err := b.containerManager.Create(runConfig, hostConfig, optionsPlatform.OS)
 	if err != nil {
 		return "", err
 	}
@@ -403,3 +510,19 @@
 		ExtraHosts: options.ExtraHosts,
 	}
 }
+
+// fromSlash works like filepath.FromSlash but with a given OS platform field
+func fromSlash(path, platform string) string {
+	if platform == "windows" {
+		return strings.Replace(path, "/", "\\", -1)
+	}
+	return path
+}
+
+// separator returns a OS path separator for the given OS platform
+func separator(platform string) byte {
+	if platform == "windows" {
+		return '\\'
+	}
+	return '/'
+}
diff --git a/builder/dockerfile/internals_test.go b/builder/dockerfile/internals_test.go
index ed7b4cd..380d861 100644
--- a/builder/dockerfile/internals_test.go
+++ b/builder/dockerfile/internals_test.go
@@ -103,7 +103,7 @@
 			doc:       "Set the command to a comment",
 			modifiers: []runConfigModifier{withCmdComment("comment", runtime.GOOS)},
 			expected: &container.Config{
-				Cmd: append(defaultShellForPlatform(runtime.GOOS), "#(nop) ", "comment"),
+				Cmd: append(defaultShellForOS(runtime.GOOS), "#(nop) ", "comment"),
 				Env: defaultEnv,
 			},
 		},
diff --git a/builder/dockerfile/internals_unix.go b/builder/dockerfile/internals_unix.go
deleted file mode 100644
index 533735c..0000000
--- a/builder/dockerfile/internals_unix.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build !windows
-
-package dockerfile
-
-import (
-	"os"
-	"path/filepath"
-	"strings"
-
-	"github.com/docker/docker/pkg/system"
-)
-
-// normalizeDest normalizes the destination of a COPY/ADD command in a
-// platform semantically consistent way.
-func normalizeDest(workingDir, requested string) (string, error) {
-	dest := filepath.FromSlash(requested)
-	endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator))
-	if !system.IsAbs(requested) {
-		dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest)
-		// Make sure we preserve any trailing slash
-		if endsInSlash {
-			dest += string(os.PathSeparator)
-		}
-	}
-	return dest, nil
-}
-
-func containsWildcards(name string) bool {
-	for i := 0; i < len(name); i++ {
-		ch := name[i]
-		if ch == '\\' {
-			i++
-		} else if ch == '*' || ch == '?' || ch == '[' {
-			return true
-		}
-	}
-	return false
-}
-
-func validateCopySourcePath(imageSource *imageMount, origPath string) error {
-	return nil
-}
diff --git a/builder/dockerfile/internals_windows.go b/builder/dockerfile/internals_windows.go
deleted file mode 100644
index 57f8329..0000000
--- a/builder/dockerfile/internals_windows.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package dockerfile
-
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"github.com/docker/docker/pkg/system"
-	"github.com/pkg/errors"
-)
-
-// normalizeDest normalizes the destination of a COPY/ADD command in a
-// platform semantically consistent way.
-func normalizeDest(workingDir, requested string) (string, error) {
-	dest := filepath.FromSlash(requested)
-	endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator))
-
-	// We are guaranteed that the working directory is already consistent,
-	// However, Windows also has, for now, the limitation that ADD/COPY can
-	// only be done to the system drive, not any drives that might be present
-	// as a result of a bind mount.
-	//
-	// So... if the path requested is Linux-style absolute (/foo or \\foo),
-	// we assume it is the system drive. If it is a Windows-style absolute
-	// (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we
-	// strip any configured working directories drive letter so that it
-	// can be subsequently legitimately converted to a Windows volume-style
-	// pathname.
-
-	// Not a typo - filepath.IsAbs, not system.IsAbs on this next check as
-	// we only want to validate where the DriveColon part has been supplied.
-	if filepath.IsAbs(dest) {
-		if strings.ToUpper(string(dest[0])) != "C" {
-			return "", fmt.Errorf("Windows does not support destinations not on the system drive (C:)")
-		}
-		dest = dest[2:] // Strip the drive letter
-	}
-
-	// Cannot handle relative where WorkingDir is not the system drive.
-	if len(workingDir) > 0 {
-		if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) {
-			return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir)
-		}
-		if !system.IsAbs(dest) {
-			if string(workingDir[0]) != "C" {
-				return "", fmt.Errorf("Windows does not support relative paths when WORKDIR is not the system drive")
-			}
-			dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest)
-			// Make sure we preserve any trailing slash
-			if endsInSlash {
-				dest += string(os.PathSeparator)
-			}
-		}
-	}
-	return dest, nil
-}
-
-func containsWildcards(name string) bool {
-	for i := 0; i < len(name); i++ {
-		ch := name[i]
-		if ch == '*' || ch == '?' || ch == '[' {
-			return true
-		}
-	}
-	return false
-}
-
-var pathBlacklist = map[string]bool{
-	"c:\\":        true,
-	"c:\\windows": true,
-}
-
-func validateCopySourcePath(imageSource *imageMount, origPath string) error {
-	// validate windows paths from other images
-	if imageSource == nil {
-		return nil
-	}
-	origPath = filepath.FromSlash(origPath)
-	p := strings.ToLower(filepath.Clean(origPath))
-	if !filepath.IsAbs(p) {
-		if filepath.VolumeName(p) != "" {
-			if p[len(p)-2:] == ":." { // case where clean returns weird c:. paths
-				p = p[:len(p)-1]
-			}
-			p += "\\"
-		} else {
-			p = filepath.Join("c:\\", p)
-		}
-	}
-	if _, blacklisted := pathBlacklist[p]; blacklisted {
-		return errors.New("copy from c:\\ or c:\\windows is not allowed on windows")
-	}
-	return nil
-}
diff --git a/builder/dockerfile/internals_windows_test.go b/builder/dockerfile/internals_windows_test.go
index ca6920c..6ecc37b 100644
--- a/builder/dockerfile/internals_windows_test.go
+++ b/builder/dockerfile/internals_windows_test.go
@@ -40,7 +40,7 @@
 	}
 	for _, testcase := range tests {
 		msg := fmt.Sprintf("Input: %s, %s", testcase.current, testcase.requested)
-		actual, err := normalizeDest(testcase.current, testcase.requested)
+		actual, err := normalizeDest(testcase.current, testcase.requested, "windows")
 		if testcase.etext == "" {
 			if !assert.NoError(t, err, msg) {
 				continue
diff --git a/builder/dockerfile/mockbackend_test.go b/builder/dockerfile/mockbackend_test.go
index adc2276..986861c 100644
--- a/builder/dockerfile/mockbackend_test.go
+++ b/builder/dockerfile/mockbackend_test.go
@@ -3,6 +3,7 @@
 import (
 	"encoding/json"
 	"io"
+	"runtime"
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/backend"
@@ -10,6 +11,7 @@
 	"github.com/docker/docker/builder"
 	containerpkg "github.com/docker/docker/container"
 	"github.com/docker/docker/layer"
+	"github.com/docker/docker/pkg/containerfs"
 	"golang.org/x/net/context"
 )
 
@@ -95,6 +97,10 @@
 	return i.config
 }
 
+func (i *mockImage) OperatingSystem() string {
+	return runtime.GOOS
+}
+
 func (i *mockImage) MarshalJSON() ([]byte, error) {
 	type rawImage mockImage
 	return json.Marshal(rawImage(*i))
@@ -117,8 +123,8 @@
 	return nil
 }
 
-func (l *mockLayer) Mount() (string, error) {
-	return "mountPath", nil
+func (l *mockLayer) Mount() (containerfs.ContainerFS, error) {
+	return containerfs.NewLocalContainerFS("mountPath"), nil
 }
 
 func (l *mockLayer) Commit(string) (builder.ReleaseableLayer, error) {
diff --git a/builder/dockerfile/parser/parser.go b/builder/dockerfile/parser/parser.go
index 7f07ff2..822c42b 100644
--- a/builder/dockerfile/parser/parser.go
+++ b/builder/dockerfile/parser/parser.go
@@ -91,9 +91,6 @@
 // DefaultEscapeToken is the default escape token
 const DefaultEscapeToken = '\\'
 
-// defaultPlatformToken is the platform assumed for the build if not explicitly provided
-var defaultPlatformToken = runtime.GOOS
-
 // Directive is the structure used during a build run to hold the state of
 // parsing directives.
 type Directive struct {
@@ -143,7 +140,7 @@
 	if len(tecMatch) != 0 {
 		for i, n := range tokenEscapeCommand.SubexpNames() {
 			if n == "escapechar" {
-				if d.escapeSeen == true {
+				if d.escapeSeen {
 					return errors.New("only one escape parser directive can be used")
 				}
 				d.escapeSeen = true
@@ -152,14 +149,13 @@
 		}
 	}
 
-	// TODO @jhowardmsft LCOW Support: Eventually this check can be removed,
-	// but only recognise a platform token if running in LCOW mode.
+	// Only recognise a platform token if LCOW is supported
 	if system.LCOWSupported() {
 		tpcMatch := tokenPlatformCommand.FindStringSubmatch(strings.ToLower(line))
 		if len(tpcMatch) != 0 {
 			for i, n := range tokenPlatformCommand.SubexpNames() {
 				if n == "platform" {
-					if d.platformSeen == true {
+					if d.platformSeen {
 						return errors.New("only one platform parser directive can be used")
 					}
 					d.platformSeen = true
@@ -177,7 +173,6 @@
 func NewDefaultDirective() *Directive {
 	directive := Directive{}
 	directive.setEscapeToken(string(DefaultEscapeToken))
-	directive.setPlatformToken(defaultPlatformToken)
 	return &directive
 }
 
@@ -242,8 +237,10 @@
 type Result struct {
 	AST         *Node
 	EscapeToken rune
-	Platform    string
-	Warnings    []string
+	// TODO @jhowardmsft - see https://github.com/moby/moby/issues/34617
+	// This next field will be removed in a future update for LCOW support.
+	OS       string
+	Warnings []string
 }
 
 // PrintWarnings to the writer
@@ -290,6 +287,10 @@
 			}
 			currentLine++
 
+			if isComment(scanner.Bytes()) {
+				// original line was a comment (processLine strips comments)
+				continue
+			}
 			if isEmptyContinuationLine(bytesRead) {
 				hasEmptyContinuationLine = true
 				continue
@@ -319,7 +320,7 @@
 		AST:         root,
 		Warnings:    warnings,
 		EscapeToken: d.escapeToken,
-		Platform:    d.platformToken,
+		OS:          d.platformToken,
 	}, nil
 }
 
@@ -331,8 +332,12 @@
 	return bytes.TrimLeftFunc(src, unicode.IsSpace)
 }
 
+func isComment(line []byte) bool {
+	return tokenComment.Match(trimWhitespace(line))
+}
+
 func isEmptyContinuationLine(line []byte) bool {
-	return len(trimComments(trimWhitespace(line))) == 0
+	return len(trimWhitespace(line)) == 0
 }
 
 var utf8bom = []byte{0xEF, 0xBB, 0xBF}
diff --git a/builder/dockerfile/parser/parser_test.go b/builder/dockerfile/parser/parser_test.go
index bb057ec..5f354bb 100644
--- a/builder/dockerfile/parser/parser_test.go
+++ b/builder/dockerfile/parser/parser_test.go
@@ -141,6 +141,13 @@
 RUN another \
 
     thing
+RUN non-indented \
+# this is a comment
+   after-comment
+
+RUN indented \
+    # this is an indented comment
+    comment
 	`)
 
 	result, err := Parse(dockerfile)
diff --git a/builder/dockerfile/support.go b/builder/dockerfile/support.go
deleted file mode 100644
index e875889..0000000
--- a/builder/dockerfile/support.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package dockerfile
-
-import "strings"
-
-// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile
-// for exec form it returns untouched args slice
-// for shell form it returns concatenated args as the first element of a slice
-func handleJSONArgs(args []string, attributes map[string]bool) []string {
-	if len(args) == 0 {
-		return []string{}
-	}
-
-	if attributes != nil && attributes["json"] {
-		return args
-	}
-
-	// literal string command, not an exec array
-	return []string{strings.Join(args, " ")}
-}
diff --git a/builder/dockerfile/support_test.go b/builder/dockerfile/support_test.go
deleted file mode 100644
index 7cc6fe9..0000000
--- a/builder/dockerfile/support_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package dockerfile
-
-import "testing"
-
-type testCase struct {
-	name       string
-	args       []string
-	attributes map[string]bool
-	expected   []string
-}
-
-func initTestCases() []testCase {
-	testCases := []testCase{}
-
-	testCases = append(testCases, testCase{
-		name:       "empty args",
-		args:       []string{},
-		attributes: make(map[string]bool),
-		expected:   []string{},
-	})
-
-	jsonAttributes := make(map[string]bool)
-	jsonAttributes["json"] = true
-
-	testCases = append(testCases, testCase{
-		name:       "json attribute with one element",
-		args:       []string{"foo"},
-		attributes: jsonAttributes,
-		expected:   []string{"foo"},
-	})
-
-	testCases = append(testCases, testCase{
-		name:       "json attribute with two elements",
-		args:       []string{"foo", "bar"},
-		attributes: jsonAttributes,
-		expected:   []string{"foo", "bar"},
-	})
-
-	testCases = append(testCases, testCase{
-		name:       "no attributes",
-		args:       []string{"foo", "bar"},
-		attributes: nil,
-		expected:   []string{"foo bar"},
-	})
-
-	return testCases
-}
-
-func TestHandleJSONArgs(t *testing.T) {
-	testCases := initTestCases()
-
-	for _, test := range testCases {
-		arguments := handleJSONArgs(test.args, test.attributes)
-
-		if len(arguments) != len(test.expected) {
-			t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments))
-		}
-
-		for i := range test.expected {
-			if arguments[i] != test.expected[i] {
-				t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i])
-			}
-		}
-	}
-}
diff --git a/builder/fscache/fscache.go b/builder/fscache/fscache.go
index faef7d1..7cb4c5c 100644
--- a/builder/fscache/fscache.go
+++ b/builder/fscache/fscache.go
@@ -1,7 +1,10 @@
 package fscache
 
 import (
+	"archive/tar"
+	"crypto/sha256"
 	"encoding/json"
+	"hash"
 	"os"
 	"path/filepath"
 	"sort"
@@ -11,8 +14,10 @@
 	"github.com/boltdb/bolt"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder/remotecontext"
+	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/directory"
 	"github.com/docker/docker/pkg/stringid"
+	"github.com/docker/docker/pkg/tarsum"
 	"github.com/moby/buildkit/session/filesync"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -212,7 +217,6 @@
 }
 
 type fsCacheStore struct {
-	root     string
 	mu       sync.Mutex
 	sources  map[string]*cachedSource
 	db       *bolt.DB
@@ -578,6 +582,10 @@
 	dc.supported = v
 }
 
+func (dc *detectChanges) ContentHasher() fsutil.ContentHasher {
+	return newTarsumHash
+}
+
 type wrappedContext struct {
 	builder.Source
 	closer func() error
@@ -607,3 +615,40 @@
 func (s sortableCacheSources) Swap(i, j int) {
 	s[i], s[j] = s[j], s[i]
 }
+
+func newTarsumHash(stat *fsutil.Stat) (hash.Hash, error) {
+	fi := &fsutil.StatInfo{stat}
+	p := stat.Path
+	if fi.IsDir() {
+		p += string(os.PathSeparator)
+	}
+	h, err := archive.FileInfoHeader(p, fi, stat.Linkname)
+	if err != nil {
+		return nil, err
+	}
+	h.Name = p
+	h.Uid = int(stat.Uid)
+	h.Gid = int(stat.Gid)
+	h.Linkname = stat.Linkname
+	if stat.Xattrs != nil {
+		h.Xattrs = make(map[string]string)
+		for k, v := range stat.Xattrs {
+			h.Xattrs[k] = string(v)
+		}
+	}
+
+	tsh := &tarsumHash{h: h, Hash: sha256.New()}
+	tsh.Reset()
+	return tsh, nil
+}
+
+// Reset resets the Hash to its initial state.
+func (tsh *tarsumHash) Reset() {
+	tsh.Hash.Reset()
+	tarsum.WriteV1Header(tsh.h, tsh.Hash)
+}
+
+type tarsumHash struct {
+	hash.Hash
+	h *tar.Header
+}
diff --git a/builder/fscache/fscache_test.go b/builder/fscache/fscache_test.go
index 3f6a1b0..c327ec7 100644
--- a/builder/fscache/fscache_test.go
+++ b/builder/fscache/fscache_test.go
@@ -36,25 +36,25 @@
 	src1, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data", "bar"})
 	assert.Nil(t, err)
 
-	dt, err := ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
+	dt, err := ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
 	assert.Nil(t, err)
 	assert.Equal(t, string(dt), "data")
 
 	// same id doesn't recalculate anything
 	src2, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo", "data2", "bar"})
 	assert.Nil(t, err)
-	assert.Equal(t, src1.Root(), src2.Root())
+	assert.Equal(t, src1.Root().Path(), src2.Root().Path())
 
-	dt, err = ioutil.ReadFile(filepath.Join(src1.Root(), "foo"))
+	dt, err = ioutil.ReadFile(filepath.Join(src1.Root().Path(), "foo"))
 	assert.Nil(t, err)
 	assert.Equal(t, string(dt), "data")
 	assert.Nil(t, src2.Close())
 
 	src3, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo2", "data2", "bar"})
 	assert.Nil(t, err)
-	assert.NotEqual(t, src1.Root(), src3.Root())
+	assert.NotEqual(t, src1.Root().Path(), src3.Root().Path())
 
-	dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo2"))
+	dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo2"))
 	assert.Nil(t, err)
 	assert.Equal(t, string(dt), "data2")
 
@@ -71,12 +71,12 @@
 	// new upload with the same shared key shoutl overwrite
 	src4, err := fscache.SyncFrom(context.TODO(), &testIdentifier{"foo3", "data3", "bar"})
 	assert.Nil(t, err)
-	assert.NotEqual(t, src1.Root(), src3.Root())
+	assert.NotEqual(t, src1.Root().Path(), src3.Root().Path())
 
-	dt, err = ioutil.ReadFile(filepath.Join(src3.Root(), "foo3"))
+	dt, err = ioutil.ReadFile(filepath.Join(src3.Root().Path(), "foo3"))
 	assert.Nil(t, err)
 	assert.Equal(t, string(dt), "data3")
-	assert.Equal(t, src4.Root(), src3.Root())
+	assert.Equal(t, src4.Root().Path(), src3.Root().Path())
 	assert.Nil(t, src4.Close())
 
 	s, err = fscache.DiskUsage()
diff --git a/builder/remotecontext/archive.go b/builder/remotecontext/archive.go
index f48cafe..b62d9dd 100644
--- a/builder/remotecontext/archive.go
+++ b/builder/remotecontext/archive.go
@@ -8,19 +8,19 @@
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/tarsum"
 	"github.com/pkg/errors"
 )
 
 type archiveContext struct {
-	root string
+	root containerfs.ContainerFS
 	sums tarsum.FileInfoSums
 }
 
 func (c *archiveContext) Close() error {
-	return os.RemoveAll(c.root)
+	return c.root.RemoveAll(c.root.Path())
 }
 
 func convertPathError(err error, cleanpath string) error {
@@ -52,7 +52,8 @@
 		return nil, err
 	}
 
-	tsc := &archiveContext{root: root}
+	// Assume local file system. Since it's coming from a tar file.
+	tsc := &archiveContext{root: containerfs.NewLocalContainerFS(root)}
 
 	// Make sure we clean-up upon error.  In the happy case the caller
 	// is expected to manage the clean-up
@@ -82,7 +83,7 @@
 	return tsc, nil
 }
 
-func (c *archiveContext) Root() string {
+func (c *archiveContext) Root() containerfs.ContainerFS {
 	return c.root
 }
 
@@ -91,7 +92,7 @@
 	if err != nil {
 		return err
 	}
-	return os.RemoveAll(fullpath)
+	return c.root.RemoveAll(fullpath)
 }
 
 func (c *archiveContext) Hash(path string) (string, error) {
@@ -100,7 +101,7 @@
 		return "", err
 	}
 
-	rel, err := filepath.Rel(c.root, fullpath)
+	rel, err := c.root.Rel(c.root.Path(), fullpath)
 	if err != nil {
 		return "", convertPathError(err, cleanpath)
 	}
@@ -115,14 +116,11 @@
 	return path, nil // backwards compat TODO: see if really needed
 }
 
-func normalize(path, root string) (cleanPath, fullPath string, err error) {
-	cleanPath = filepath.Clean(string(os.PathSeparator) + path)[1:]
-	fullPath, err = symlink.FollowSymlinkInScope(filepath.Join(root, path), root)
+func normalize(path string, root containerfs.ContainerFS) (cleanPath, fullPath string, err error) {
+	cleanPath = root.Clean(string(root.Separator()) + path)[1:]
+	fullPath, err = root.ResolveScopedPath(path, true)
 	if err != nil {
 		return "", "", errors.Wrapf(err, "forbidden path outside the build context: %s (%s)", path, cleanPath)
 	}
-	if _, err := os.Lstat(fullPath); err != nil {
-		return "", "", errors.WithStack(convertPathError(err, path))
-	}
 	return
 }
diff --git a/builder/remotecontext/detect.go b/builder/remotecontext/detect.go
index ec32dbe..38aff67 100644
--- a/builder/remotecontext/detect.go
+++ b/builder/remotecontext/detect.go
@@ -5,15 +5,14 @@
 	"fmt"
 	"io"
 	"os"
-	"path/filepath"
 	"strings"
 
+	"github.com/containerd/continuity/driver"
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder/dockerfile/parser"
 	"github.com/docker/docker/builder/dockerignore"
 	"github.com/docker/docker/pkg/fileutils"
-	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/urlutil"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -157,12 +156,12 @@
 	return parser.Parse(br)
 }
 
-func openAt(remote builder.Source, path string) (*os.File, error) {
+func openAt(remote builder.Source, path string) (driver.File, error) {
 	fullPath, err := FullPath(remote, path)
 	if err != nil {
 		return nil, err
 	}
-	return os.Open(fullPath)
+	return remote.Root().Open(fullPath)
 }
 
 // StatAt is a helper for calling Stat on a path from a source
@@ -171,12 +170,12 @@
 	if err != nil {
 		return nil, err
 	}
-	return os.Stat(fullPath)
+	return remote.Root().Stat(fullPath)
 }
 
 // FullPath is a helper for getting a full path for a path from a source
 func FullPath(remote builder.Source, path string) (string, error) {
-	fullPath, err := symlink.FollowSymlinkInScope(filepath.Join(remote.Root(), path), remote.Root())
+	fullPath, err := remote.Root().ResolveScopedPath(path, true)
 	if err != nil {
 		return "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullPath) // backwards compat with old error
 	}
diff --git a/builder/remotecontext/detect_test.go b/builder/remotecontext/detect_test.go
index 6b47ac2..3d1ebd1 100644
--- a/builder/remotecontext/detect_test.go
+++ b/builder/remotecontext/detect_test.go
@@ -5,11 +5,11 @@
 	"io/ioutil"
 	"log"
 	"os"
-	"path/filepath"
 	"sort"
 	"testing"
 
 	"github.com/docker/docker/builder"
+	"github.com/docker/docker/pkg/containerfs"
 )
 
 const (
@@ -21,7 +21,7 @@
 const shouldStayFilename = "should_stay"
 
 func extractFilenames(files []os.FileInfo) []string {
-	filenames := make([]string, len(files), len(files))
+	filenames := make([]string, len(files))
 
 	for i, file := range files {
 		filenames[i] = file.Name()
@@ -53,7 +53,7 @@
 }
 
 func executeProcess(t *testing.T, contextDir string) {
-	modifiableCtx := &stubRemote{root: contextDir}
+	modifiableCtx := &stubRemote{root: containerfs.NewLocalContainerFS(contextDir)}
 
 	err := removeDockerfile(modifiableCtx, builder.DefaultDockerfileName)
 
@@ -105,19 +105,19 @@
 
 // TODO: remove after moving to a separate pkg
 type stubRemote struct {
-	root string
+	root containerfs.ContainerFS
 }
 
 func (r *stubRemote) Hash(path string) (string, error) {
 	return "", errors.New("not implemented")
 }
 
-func (r *stubRemote) Root() string {
+func (r *stubRemote) Root() containerfs.ContainerFS {
 	return r.root
 }
 func (r *stubRemote) Close() error {
 	return errors.New("not implemented")
 }
 func (r *stubRemote) Remove(p string) error {
-	return os.Remove(filepath.Join(r.root, p))
+	return r.root.Remove(r.root.Join(r.root.Path(), p))
 }
diff --git a/builder/remotecontext/git.go b/builder/remotecontext/git.go
index 158bb5a..f6fc0bc 100644
--- a/builder/remotecontext/git.go
+++ b/builder/remotecontext/git.go
@@ -6,6 +6,7 @@
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/builder/remotecontext/git"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/sirupsen/logrus"
 )
 
 // MakeGitContext returns a Context from gitURL that is cloned in a temporary directory.
@@ -21,9 +22,14 @@
 	}
 
 	defer func() {
-		// TODO: print errors?
-		c.Close()
-		os.RemoveAll(root)
+		err := c.Close()
+		if err != nil {
+			logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while closing git context")
+		}
+		err = os.RemoveAll(root)
+		if err != nil {
+			logrus.WithField("action", "MakeGitContext").WithField("module", "builder").WithField("url", gitURL).WithError(err).Error("error while removing path and children of root")
+		}
 	}()
 	return FromArchive(c)
 }
diff --git a/builder/remotecontext/lazycontext.go b/builder/remotecontext/lazycontext.go
index b29c413..08b8058 100644
--- a/builder/remotecontext/lazycontext.go
+++ b/builder/remotecontext/lazycontext.go
@@ -3,11 +3,10 @@
 import (
 	"encoding/hex"
 	"os"
-	"path/filepath"
-	"runtime"
 	"strings"
 
 	"github.com/docker/docker/builder"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/pools"
 	"github.com/pkg/errors"
 )
@@ -15,7 +14,7 @@
 // NewLazySource creates a new LazyContext. LazyContext defines a hashed build
 // context based on a root directory. Individual files are hashed first time
 // they are asked. It is not safe to call methods of LazyContext concurrently.
-func NewLazySource(root string) (builder.Source, error) {
+func NewLazySource(root containerfs.ContainerFS) (builder.Source, error) {
 	return &lazySource{
 		root: root,
 		sums: make(map[string]string),
@@ -23,11 +22,11 @@
 }
 
 type lazySource struct {
-	root string
+	root containerfs.ContainerFS
 	sums map[string]string
 }
 
-func (c *lazySource) Root() string {
+func (c *lazySource) Root() containerfs.ContainerFS {
 	return c.root
 }
 
@@ -41,16 +40,18 @@
 		return "", err
 	}
 
-	fi, err := os.Lstat(fullPath)
-	if err != nil {
-		return "", errors.WithStack(err)
-	}
-
 	relPath, err := Rel(c.root, fullPath)
 	if err != nil {
 		return "", errors.WithStack(convertPathError(err, cleanPath))
 	}
 
+	fi, err := os.Lstat(fullPath)
+	if err != nil {
+		// Backwards compatibility: a missing file returns a path as hash.
+		// This is reached in the case of a broken symlink.
+		return relPath, nil
+	}
+
 	sum, ok := c.sums[relPath]
 	if !ok {
 		sum, err = c.prepareHash(relPath, fi)
@@ -63,13 +64,13 @@
 }
 
 func (c *lazySource) prepareHash(relPath string, fi os.FileInfo) (string, error) {
-	p := filepath.Join(c.root, relPath)
+	p := c.root.Join(c.root.Path(), relPath)
 	h, err := NewFileHash(p, relPath, fi)
 	if err != nil {
 		return "", errors.Wrapf(err, "failed to create hash for %s", relPath)
 	}
 	if fi.Mode().IsRegular() && fi.Size() > 0 {
-		f, err := os.Open(p)
+		f, err := c.root.Open(p)
 		if err != nil {
 			return "", errors.Wrapf(err, "failed to open %s", relPath)
 		}
@@ -85,10 +86,10 @@
 
 // Rel makes a path relative to base path. Same as `filepath.Rel` but can also
 // handle UUID paths in windows.
-func Rel(basepath, targpath string) (string, error) {
+func Rel(basepath containerfs.ContainerFS, targpath string) (string, error) {
 	// filepath.Rel can't handle UUID paths in windows
-	if runtime.GOOS == "windows" {
-		pfx := basepath + `\`
+	if basepath.OS() == "windows" {
+		pfx := basepath.Path() + `\`
 		if strings.HasPrefix(targpath, pfx) {
 			p := strings.TrimPrefix(targpath, pfx)
 			if p == "" {
@@ -97,5 +98,5 @@
 			return p, nil
 		}
 	}
-	return filepath.Rel(basepath, targpath)
+	return basepath.Rel(basepath.Path(), targpath)
 }
diff --git a/builder/remotecontext/remote.go b/builder/remotecontext/remote.go
index 6733ff9..6c4073b 100644
--- a/builder/remotecontext/remote.go
+++ b/builder/remotecontext/remote.go
@@ -116,7 +116,7 @@
 		plen = maxPreambleLength
 	}
 
-	preamble := make([]byte, plen, plen)
+	preamble := make([]byte, plen)
 	rlen, err := r.Read(preamble)
 	if rlen == 0 {
 		return ct, ioutil.NopCloser(r), errors.New("empty response")
diff --git a/builder/remotecontext/tarsum.go b/builder/remotecontext/tarsum.go
index 3ae9d82..78f7470 100644
--- a/builder/remotecontext/tarsum.go
+++ b/builder/remotecontext/tarsum.go
@@ -1,25 +1,24 @@
 package remotecontext
 
 import (
-	"fmt"
 	"os"
-	"path/filepath"
 	"sync"
 
-	"github.com/docker/docker/pkg/symlink"
+	"github.com/docker/docker/pkg/containerfs"
 	iradix "github.com/hashicorp/go-immutable-radix"
+	digest "github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 	"github.com/tonistiigi/fsutil"
 )
 
 type hashed interface {
-	Hash() string
+	Digest() digest.Digest
 }
 
 // CachableSource is a source that contains cache records for its contents
 type CachableSource struct {
 	mu   sync.Mutex
-	root string
+	root containerfs.ContainerFS
 	tree *iradix.Tree
 	txn  *iradix.Txn
 }
@@ -28,7 +27,7 @@
 func NewCachableSource(root string) *CachableSource {
 	ts := &CachableSource{
 		tree: iradix.New(),
-		root: root,
+		root: containerfs.NewLocalContainerFS(root),
 	}
 	return ts
 }
@@ -67,7 +66,7 @@
 		return err
 	}
 	txn := iradix.New().Txn()
-	err = filepath.Walk(cs.root, func(path string, info os.FileInfo, err error) error {
+	err = cs.root.Walk(cs.root.Path(), func(path string, info os.FileInfo, err error) error {
 		if err != nil {
 			return errors.Wrapf(err, "failed to walk %s", path)
 		}
@@ -110,7 +109,7 @@
 	}
 
 	hfi := &fileInfo{
-		sum: h.Hash(),
+		sum: h.Digest().Hex(),
 	}
 	cs.txn.Insert([]byte(p), hfi)
 	cs.mu.Unlock()
@@ -133,35 +132,19 @@
 	return nil
 }
 
-func (cs *CachableSource) normalize(path string) (cleanpath, fullpath string, err error) {
-	cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:]
-	fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(cs.root, path), cs.root)
-	if err != nil {
-		return "", "", fmt.Errorf("Forbidden path outside the context: %s (%s)", path, fullpath)
-	}
-	_, err = os.Lstat(fullpath)
-	if err != nil {
-		return "", "", convertPathError(err, path)
-	}
-	return
-}
-
 // Hash returns a hash for a single file in the source
 func (cs *CachableSource) Hash(path string) (string, error) {
 	n := cs.getRoot()
-	sum := ""
 	// TODO: check this for symlinks
 	v, ok := n.Get([]byte(path))
 	if !ok {
-		sum = path
-	} else {
-		sum = v.(*fileInfo).sum
+		return path, nil
 	}
-	return sum, nil
+	return v.(*fileInfo).sum, nil
 }
 
 // Root returns a root directory for the source
-func (cs *CachableSource) Root() string {
+func (cs *CachableSource) Root() containerfs.ContainerFS {
 	return cs.root
 }
 
diff --git a/builder/remotecontext/tarsum.pb.go b/builder/remotecontext/tarsum.pb.go
index 561a7f6..1d23bbe 100644
--- a/builder/remotecontext/tarsum.pb.go
+++ b/builder/remotecontext/tarsum.pb.go
@@ -94,7 +94,7 @@
 	s := make([]string, 0, 5)
 	s = append(s, "&remotecontext.TarsumBackup{")
 	keysForHashes := make([]string, 0, len(this.Hashes))
-	for k, _ := range this.Hashes {
+	for k := range this.Hashes {
 		keysForHashes = append(keysForHashes, k)
 	}
 	github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
@@ -133,7 +133,7 @@
 	var l int
 	_ = l
 	if len(m.Hashes) > 0 {
-		for k, _ := range m.Hashes {
+		for k := range m.Hashes {
 			dAtA[i] = 0xa
 			i++
 			v := m.Hashes[k]
@@ -211,7 +211,7 @@
 		return "nil"
 	}
 	keysForHashes := make([]string, 0, len(this.Hashes))
-	for k, _ := range this.Hashes {
+	for k := range this.Hashes {
 		keysForHashes = append(keysForHashes, k)
 	}
 	github_com_gogo_protobuf_sortkeys.Strings(keysForHashes)
diff --git a/builder/remotecontext/tarsum_test.go b/builder/remotecontext/tarsum_test.go
index 8a9d69b..9395460 100644
--- a/builder/remotecontext/tarsum_test.go
+++ b/builder/remotecontext/tarsum_test.go
@@ -35,7 +35,7 @@
 		t.Fatalf("Error while executing Close: %s", err)
 	}
 
-	_, err = os.Stat(src.Root())
+	_, err = os.Stat(src.Root().Path())
 
 	if !os.IsNotExist(err) {
 		t.Fatal("Directory should not exist at this point")
@@ -104,17 +104,6 @@
 	}
 }
 
-func TestStatNotExisting(t *testing.T) {
-	contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test")
-	defer cleanup()
-
-	src := makeTestArchiveContext(t, contextDir)
-	_, err := src.Hash("not-existing")
-	if !os.IsNotExist(errors.Cause(err)) {
-		t.Fatalf("This file should not exist: %s", err)
-	}
-}
-
 func TestRemoveDirectory(t *testing.T) {
 	contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test")
 	defer cleanup()
@@ -129,17 +118,20 @@
 
 	src := makeTestArchiveContext(t, contextDir)
 
-	tarSum := src.(modifiableContext)
+	_, err = src.Root().Stat(src.Root().Join(src.Root().Path(), relativePath))
+	if err != nil {
+		t.Fatalf("Statting %s shouldn't fail: %+v", relativePath, err)
+	}
 
+	tarSum := src.(modifiableContext)
 	err = tarSum.Remove(relativePath)
 	if err != nil {
 		t.Fatalf("Error when executing Remove: %s", err)
 	}
 
-	_, err = src.Hash(contextSubdir)
-
+	_, err = src.Root().Stat(src.Root().Join(src.Root().Path(), relativePath))
 	if !os.IsNotExist(errors.Cause(err)) {
-		t.Fatal("Directory should not exist at this point")
+		t.Fatalf("Directory should not exist at this point: %+v ", err)
 	}
 }
 
diff --git a/client/checkpoint_list.go b/client/checkpoint_list.go
index ffe44bc..9835bad 100644
--- a/client/checkpoint_list.go
+++ b/client/checkpoint_list.go
@@ -2,7 +2,6 @@
 
 import (
 	"encoding/json"
-	"net/http"
 	"net/url"
 
 	"github.com/docker/docker/api/types"
@@ -20,10 +19,7 @@
 
 	resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil)
 	if err != nil {
-		if resp.statusCode == http.StatusNotFound {
-			return checkpoints, containerNotFoundError{container}
-		}
-		return checkpoints, err
+		return checkpoints, wrapResponseError(err, resp, "container", container)
 	}
 
 	err = json.NewDecoder(resp.body).Decode(&checkpoints)
diff --git a/client/checkpoint_list_test.go b/client/checkpoint_list_test.go
index 3884657..11426ed 100644
--- a/client/checkpoint_list_test.go
+++ b/client/checkpoint_list_test.go
@@ -62,7 +62,7 @@
 	}
 
 	_, err := client.CheckpointList(context.Background(), "unknown", types.CheckpointListOptions{})
-	if err == nil || !IsErrContainerNotFound(err) {
+	if err == nil || !IsErrNotFound(err) {
 		t.Fatalf("expected a containerNotFound error, got %v", err)
 	}
 }
diff --git a/client/client.go b/client/client.go
index f7a8c07..8931248 100644
--- a/client/client.go
+++ b/client/client.go
@@ -1,10 +1,6 @@
 /*
 Package client is a Go client for the Docker Engine API.
 
-The "docker" command uses this package to communicate with the daemon. It can also
-be used by your own Go applications to do anything the command-line interface does
-- running containers, pulling images, managing swarms, etc.
-
 For more information about the Engine API, see the documentation:
 https://docs.docker.com/engine/reference/api/
 
@@ -160,7 +156,7 @@
 // highly recommended that you set a version or your client may break if the
 // server is upgraded.
 func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) {
-	proto, addr, basePath, err := ParseHost(host)
+	hostURL, err := ParseHostURL(host)
 	if err != nil {
 		return nil, err
 	}
@@ -171,7 +167,7 @@
 		}
 	} else {
 		transport := new(http.Transport)
-		sockets.ConfigureTransport(transport, proto, addr)
+		sockets.ConfigureTransport(transport, hostURL.Scheme, hostURL.Host)
 		client = &http.Client{
 			Transport:     transport,
 			CheckRedirect: CheckRedirect,
@@ -189,28 +185,24 @@
 		scheme = "https"
 	}
 
+	// TODO: store URL instead of proto/addr/basePath
 	return &Client{
 		scheme:            scheme,
 		host:              host,
-		proto:             proto,
-		addr:              addr,
-		basePath:          basePath,
+		proto:             hostURL.Scheme,
+		addr:              hostURL.Host,
+		basePath:          hostURL.Path,
 		client:            client,
 		version:           version,
 		customHTTPHeaders: httpHeaders,
 	}, nil
 }
 
-// Close ensures that transport.Client is closed
-// especially needed while using NewClient with *http.Client = nil
-// for example
-// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"})
+// Close the transport used by the client
 func (cli *Client) Close() error {
-
 	if t, ok := cli.client.Transport.(*http.Transport); ok {
 		t.CloseIdleConnections()
 	}
-
 	return nil
 }
 
@@ -220,37 +212,27 @@
 	var apiPath string
 	if cli.version != "" {
 		v := strings.TrimPrefix(cli.version, "v")
-		apiPath = path.Join(cli.basePath, "/v"+v+p)
+		apiPath = path.Join(cli.basePath, "/v"+v, p)
 	} else {
 		apiPath = path.Join(cli.basePath, p)
 	}
-
-	u := &url.URL{
-		Path: apiPath,
-	}
-	if len(query) > 0 {
-		u.RawQuery = query.Encode()
-	}
-	return u.String()
+	return (&url.URL{Path: apiPath, RawQuery: query.Encode()}).String()
 }
 
-// ClientVersion returns the version string associated with this
-// instance of the Client. Note that this value can be changed
-// via the DOCKER_API_VERSION env var.
-// This operation doesn't acquire a mutex.
+// ClientVersion returns the API version used by this client.
 func (cli *Client) ClientVersion() string {
 	return cli.version
 }
 
-// NegotiateAPIVersion updates the version string associated with this
-// instance of the Client to match the latest version the server supports
+// NegotiateAPIVersion queries the API and updates the version to match the
+// API version. Any errors are silently ignored.
 func (cli *Client) NegotiateAPIVersion(ctx context.Context) {
 	ping, _ := cli.Ping(ctx)
 	cli.NegotiateAPIVersionPing(ping)
 }
 
-// NegotiateAPIVersionPing updates the version string associated with this
-// instance of the Client to match the latest version the server supports
+// NegotiateAPIVersionPing updates the client version to match the Ping.APIVersion
+// if the ping version is less than the default version.
 func (cli *Client) NegotiateAPIVersionPing(p types.Ping) {
 	if cli.manualOverride {
 		return
@@ -266,23 +248,34 @@
 		cli.version = api.DefaultVersion
 	}
 
-	// if server version is lower than the maximum version supported by the Client, downgrade
-	if versions.LessThan(p.APIVersion, api.DefaultVersion) {
+	// if server version is lower than the client version, downgrade
+	if versions.LessThan(p.APIVersion, cli.version) {
 		cli.version = p.APIVersion
 	}
 }
 
-// DaemonHost returns the host associated with this instance of the Client.
-// This operation doesn't acquire a mutex.
+// DaemonHost returns the host address used by the client
 func (cli *Client) DaemonHost() string {
 	return cli.host
 }
 
-// ParseHost verifies that the given host strings is valid.
+// ParseHost parses a url string, validates the strings is a host url, and returns
+// the parsed host as: protocol, address, and base path
+// Deprecated: use ParseHostURL
 func ParseHost(host string) (string, string, string, error) {
+	hostURL, err := ParseHostURL(host)
+	if err != nil {
+		return "", "", "", err
+	}
+	return hostURL.Scheme, hostURL.Host, hostURL.Path, nil
+}
+
+// ParseHostURL parses a url string, validates the string is a host url, and
+// returns the parsed URL
+func ParseHostURL(host string) (*url.URL, error) {
 	protoAddrParts := strings.SplitN(host, "://", 2)
 	if len(protoAddrParts) == 1 {
-		return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host)
+		return nil, fmt.Errorf("unable to parse docker host `%s`", host)
 	}
 
 	var basePath string
@@ -290,16 +283,19 @@
 	if proto == "tcp" {
 		parsed, err := url.Parse("tcp://" + addr)
 		if err != nil {
-			return "", "", "", err
+			return nil, err
 		}
 		addr = parsed.Host
 		basePath = parsed.Path
 	}
-	return proto, addr, basePath, nil
+	return &url.URL{
+		Scheme: proto,
+		Host:   addr,
+		Path:   basePath,
+	}, nil
 }
 
-// CustomHTTPHeaders returns the custom http headers associated with this
-// instance of the Client. This operation doesn't acquire a mutex.
+// CustomHTTPHeaders returns the custom http headers stored by the client.
 func (cli *Client) CustomHTTPHeaders() map[string]string {
 	m := make(map[string]string)
 	for k, v := range cli.customHTTPHeaders {
@@ -308,8 +304,7 @@
 	return m
 }
 
-// SetCustomHTTPHeaders updates the custom http headers associated with this
-// instance of the Client. This operation doesn't acquire a mutex.
+// SetCustomHTTPHeaders that will be set on every HTTP request made by the client.
 func (cli *Client) SetCustomHTTPHeaders(headers map[string]string) {
 	cli.customHTTPHeaders = headers
 }
diff --git a/client/client_test.go b/client/client_test.go
index bc911c0..cb38d4d 100644
--- a/client/client_test.go
+++ b/client/client_test.go
@@ -11,6 +11,7 @@
 
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/internal/testutil"
 	"github.com/stretchr/testify/assert"
 )
 
@@ -104,11 +105,11 @@
 }
 
 func TestGetAPIPath(t *testing.T) {
-	cases := []struct {
-		v string
-		p string
-		q url.Values
-		e string
+	testcases := []struct {
+		version  string
+		path     string
+		query    url.Values
+		expected string
 	}{
 		{"", "/containers/json", nil, "/containers/json"},
 		{"", "/containers/json", url.Values{}, "/containers/json"},
@@ -122,16 +123,10 @@
 		{"v1.22", "/networks/kiwl$%^", nil, "/v1.22/networks/kiwl$%25%5E"},
 	}
 
-	for _, cs := range cases {
-		c, err := NewClient("unix:///var/run/docker.sock", cs.v, nil, nil)
-		if err != nil {
-			t.Fatal(err)
-		}
-		g := c.getAPIPath(cs.p, cs.q)
-		assert.Equal(t, g, cs.e)
-
-		err = c.Close()
-		assert.NoError(t, err)
+	for _, testcase := range testcases {
+		c := Client{version: testcase.version, basePath: "/"}
+		actual := c.getAPIPath(testcase.path, testcase.query)
+		assert.Equal(t, actual, testcase.expected)
 	}
 }
 
@@ -152,7 +147,6 @@
 
 	for _, cs := range cases {
 		p, a, b, e := ParseHost(cs.host)
-		// if we expected an error to be returned...
 		if cs.err {
 			assert.Error(t, e)
 		}
@@ -162,6 +156,43 @@
 	}
 }
 
+func TestParseHostURL(t *testing.T) {
+	testcases := []struct {
+		host        string
+		expected    *url.URL
+		expectedErr string
+	}{
+		{
+			host:        "",
+			expectedErr: "unable to parse docker host",
+		},
+		{
+			host:        "foobar",
+			expectedErr: "unable to parse docker host",
+		},
+		{
+			host:     "foo://bar",
+			expected: &url.URL{Scheme: "foo", Host: "bar"},
+		},
+		{
+			host:     "tcp://localhost:2476",
+			expected: &url.URL{Scheme: "tcp", Host: "localhost:2476"},
+		},
+		{
+			host:     "tcp://localhost:2476/path",
+			expected: &url.URL{Scheme: "tcp", Host: "localhost:2476", Path: "/path"},
+		},
+	}
+
+	for _, testcase := range testcases {
+		actual, err := ParseHostURL(testcase.host)
+		if testcase.expectedErr != "" {
+			testutil.ErrorContains(t, err, testcase.expectedErr)
+		}
+		assert.Equal(t, testcase.expected, actual)
+	}
+}
+
 func TestNewEnvClientSetsDefaultVersion(t *testing.T) {
 	env := envToMap()
 	defer mapToEnv(env)
@@ -245,6 +276,14 @@
 	// test downgrade
 	client.NegotiateAPIVersionPing(ping)
 	assert.Equal(t, expected, client.version)
+
+	// set the client version to something older, and verify that we keep the
+	// original setting.
+	expected = "1.20"
+	client.version = expected
+	client.NegotiateAPIVersionPing(ping)
+	assert.Equal(t, expected, client.version)
+
 }
 
 // TestNegotiateAPIVersionOverride asserts that we honor
diff --git a/client/client_unix.go b/client/client_unix.go
index 89de892..eba8d90 100644
--- a/client/client_unix.go
+++ b/client/client_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris openbsd darwin
+// +build linux freebsd openbsd darwin
 
 package client
 
diff --git a/client/config_inspect.go b/client/config_inspect.go
index ebb6d63..b44d6fd 100644
--- a/client/config_inspect.go
+++ b/client/config_inspect.go
@@ -4,7 +4,6 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
 
 	"github.com/docker/docker/api/types/swarm"
 	"golang.org/x/net/context"
@@ -17,10 +16,7 @@
 	}
 	resp, err := cli.get(ctx, "/configs/"+id, nil, nil)
 	if err != nil {
-		if resp.statusCode == http.StatusNotFound {
-			return swarm.Config{}, nil, configNotFoundError{id}
-		}
-		return swarm.Config{}, nil, err
+		return swarm.Config{}, nil, wrapResponseError(err, resp, "config", id)
 	}
 	defer ensureReaderClosed(resp)
 
diff --git a/client/config_inspect_test.go b/client/config_inspect_test.go
index 010b188..fab0c28 100644
--- a/client/config_inspect_test.go
+++ b/client/config_inspect_test.go
@@ -42,7 +42,7 @@
 	}
 
 	_, _, err := client.ConfigInspectWithRaw(context.Background(), "unknown")
-	if err == nil || !IsErrConfigNotFound(err) {
+	if err == nil || !IsErrNotFound(err) {
 		t.Fatalf("expected a configNotFoundError error, got %v", err)
 	}
 }
@@ -53,7 +53,7 @@
 		version: "1.30",
 		client: newMockClient(func(req *http.Request) (*http.Response, error) {
 			if !strings.HasPrefix(req.URL.Path, expectedURL) {
-				return nil, fmt.Errorf("Expected URL '%s', got '%s'", expectedURL, req.URL)
+				return nil, fmt.Errorf("expected URL '%s', got '%s'", expectedURL, req.URL)
 			}
 			content, err := json.Marshal(swarm.Config{
 				ID: "config_id",
diff --git a/client/config_list.go b/client/config_list.go
index 8483ca1..57febc9 100644
--- a/client/config_list.go
+++ b/client/config_list.go
@@ -18,7 +18,7 @@
 	query := url.Values{}
 
 	if options.Filters.Len() > 0 {
-		filterJSON, err := filters.ToParam(options.Filters)
+		filterJSON, err := filters.ToJSON(options.Filters)
 		if err != nil {
 			return nil, err
 		}
diff --git a/client/config_remove.go b/client/config_remove.go
index 726b5c8..e025d44 100644
--- a/client/config_remove.go
+++ b/client/config_remove.go
@@ -9,5 +9,5 @@
 	}
 	resp, err := cli.delete(ctx, "/configs/"+id, nil, nil)
 	ensureReaderClosed(resp)
-	return err
+	return wrapResponseError(err, resp, "config", id)
 }
diff --git a/client/container_commit.go b/client/container_commit.go
index 531d796..b3b16ab 100644
--- a/client/container_commit.go
+++ b/client/container_commit.go
@@ -39,7 +39,7 @@
 	for _, change := range options.Changes {
 		query.Add("changes", change)
 	}
-	if options.Pause != true {
+	if !options.Pause {
 		query.Set("pause", "0")
 	}
 
diff --git a/client/container_create.go b/client/container_create.go
index 6841b0b..bd817e7 100644
--- a/client/container_create.go
+++ b/client/container_create.go
@@ -45,7 +45,7 @@
 	serverResp, err := cli.post(ctx, "/containers/create", query, body, nil)
 	if err != nil {
 		if serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") {
-			return response, imageNotFoundError{config.Image}
+			return response, objectNotFoundError{object: "image", id: config.Image}
 		}
 		return response, err
 	}
diff --git a/client/container_create_test.go b/client/container_create_test.go
index 3ab608c..9464dc6 100644
--- a/client/container_create_test.go
+++ b/client/container_create_test.go
@@ -37,7 +37,7 @@
 		client: newMockClient(errorMock(http.StatusNotFound, "No such image")),
 	}
 	_, err := client.ContainerCreate(context.Background(), &container.Config{Image: "unknown_image"}, nil, nil, "unknown")
-	if err == nil || !IsErrImageNotFound(err) {
+	if err == nil || !IsErrNotFound(err) {
 		t.Fatalf("expected an imageNotFound error, got %v", err)
 	}
 }
diff --git a/client/container_exec.go b/client/container_exec.go
index 0665c54..29670d0 100644
--- a/client/container_exec.go
+++ b/client/container_exec.go
@@ -35,7 +35,7 @@
 // It returns a types.HijackedConnection with the hijacked connection
 // and the a reader to get output. It's up to the called to close
 // the hijacked connection by calling types.HijackedResponse.Close.
-func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) {
+func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error) {
 	headers := map[string][]string{"Content-Type": {"application/json"}}
 	return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers)
 }
diff --git a/client/container_inspect.go b/client/container_inspect.go
index 17f1809..a15db14 100644
--- a/client/container_inspect.go
+++ b/client/container_inspect.go
@@ -4,7 +4,6 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
 	"net/url"
 
 	"github.com/docker/docker/api/types"
@@ -15,10 +14,7 @@
 func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) {
 	serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil)
 	if err != nil {
-		if serverResp.statusCode == http.StatusNotFound {
-			return types.ContainerJSON{}, containerNotFoundError{containerID}
-		}
-		return types.ContainerJSON{}, err
+		return types.ContainerJSON{}, wrapResponseError(err, serverResp, "container", containerID)
 	}
 
 	var response types.ContainerJSON
@@ -35,10 +31,7 @@
 	}
 	serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil)
 	if err != nil {
-		if serverResp.statusCode == http.StatusNotFound {
-			return types.ContainerJSON{}, nil, containerNotFoundError{containerID}
-		}
-		return types.ContainerJSON{}, nil, err
+		return types.ContainerJSON{}, nil, wrapResponseError(err, serverResp, "container", containerID)
 	}
 	defer ensureReaderClosed(serverResp)
 
diff --git a/client/container_inspect_test.go b/client/container_inspect_test.go
index 98f83bd..37259e4 100644
--- a/client/container_inspect_test.go
+++ b/client/container_inspect_test.go
@@ -30,7 +30,7 @@
 	}
 
 	_, err := client.ContainerInspect(context.Background(), "unknown")
-	if err == nil || !IsErrContainerNotFound(err) {
+	if err == nil || !IsErrNotFound(err) {
 		t.Fatalf("expected a containerNotFound error, got %v", err)
 	}
 }
diff --git a/client/container_logs.go b/client/container_logs.go
index 0f32e9f..35c297c 100644
--- a/client/container_logs.go
+++ b/client/container_logs.go
@@ -51,6 +51,14 @@
 		query.Set("since", ts)
 	}
 
+	if options.Until != "" {
+		ts, err := timetypes.GetTimestamp(options.Until, time.Now())
+		if err != nil {
+			return nil, err
+		}
+		query.Set("until", ts)
+	}
+
 	if options.Timestamps {
 		query.Set("timestamps", "1")
 	}
diff --git a/client/container_logs_test.go b/client/container_logs_test.go
index 99e3184..8cb7635 100644
--- a/client/container_logs_test.go
+++ b/client/container_logs_test.go
@@ -13,6 +13,7 @@
 	"time"
 
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/internal/testutil"
 
 	"golang.org/x/net/context"
 )
@@ -28,9 +29,11 @@
 	_, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{
 		Since: "2006-01-02TZ",
 	})
-	if err == nil || !strings.Contains(err.Error(), `parsing time "2006-01-02TZ"`) {
-		t.Fatalf("expected a 'parsing time' error, got %v", err)
-	}
+	testutil.ErrorContains(t, err, `parsing time "2006-01-02TZ"`)
+	_, err = client.ContainerLogs(context.Background(), "container_id", types.ContainerLogsOptions{
+		Until: "2006-01-02TZ",
+	})
+	testutil.ErrorContains(t, err, `parsing time "2006-01-02TZ"`)
 }
 
 func TestContainerLogs(t *testing.T) {
@@ -80,6 +83,17 @@
 				"since": "invalid but valid",
 			},
 		},
+		{
+			options: types.ContainerLogsOptions{
+				// An complete invalid date, timestamp or go duration will be
+				// passed as is
+				Until: "invalid but valid",
+			},
+			expectedQueryParams: map[string]string{
+				"tail":  "",
+				"until": "invalid but valid",
+			},
+		},
 	}
 	for _, logCase := range cases {
 		client := &Client{
diff --git a/client/container_remove.go b/client/container_remove.go
index 3a79590..070108b 100644
--- a/client/container_remove.go
+++ b/client/container_remove.go
@@ -23,5 +23,5 @@
 
 	resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil)
 	ensureReaderClosed(resp)
-	return err
+	return wrapResponseError(err, resp, "container", containerID)
 }
diff --git a/client/container_remove_test.go b/client/container_remove_test.go
index 798c08b..0eab7ee 100644
--- a/client/container_remove_test.go
+++ b/client/container_remove_test.go
@@ -9,6 +9,7 @@
 	"testing"
 
 	"github.com/docker/docker/api/types"
+	"github.com/stretchr/testify/assert"
 	"golang.org/x/net/context"
 )
 
@@ -17,9 +18,16 @@
 		client: newMockClient(errorMock(http.StatusInternalServerError, "Server error")),
 	}
 	err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{})
-	if err == nil || err.Error() != "Error response from daemon: Server error" {
-		t.Fatalf("expected a Server Error, got %v", err)
+	assert.EqualError(t, err, "Error response from daemon: Server error")
+}
+
+func TestContainerRemoveNotFoundError(t *testing.T) {
+	client := &Client{
+		client: newMockClient(errorMock(http.StatusNotFound, "missing")),
 	}
+	err := client.ContainerRemove(context.Background(), "container_id", types.ContainerRemoveOptions{})
+	assert.EqualError(t, err, "Error: No such container: container_id")
+	assert.True(t, IsErrNotFound(err))
 }
 
 func TestContainerRemove(t *testing.T) {
@@ -53,7 +61,5 @@
 		RemoveVolumes: true,
 		Force:         true,
 	})
-	if err != nil {
-		t.Fatal(err)
-	}
+	assert.NoError(t, err)
 }
diff --git a/client/errors.go b/client/errors.go
index fc7df9f..e41b728 100644
--- a/client/errors.go
+++ b/client/errors.go
@@ -3,6 +3,8 @@
 import (
 	"fmt"
 
+	"net/http"
+
 	"github.com/docker/docker/api/types/versions"
 	"github.com/pkg/errors"
 )
@@ -36,95 +38,37 @@
 	NotFound() bool // Is the error a NotFound error
 }
 
-// IsErrNotFound returns true if the error is caused with an
-// object (image, container, network, volume, …) is not found in the docker host.
+// IsErrNotFound returns true if the error is a NotFound error, which is returned
+// by the API when some object is not found.
 func IsErrNotFound(err error) bool {
 	te, ok := err.(notFound)
 	return ok && te.NotFound()
 }
 
-// imageNotFoundError implements an error returned when an image is not in the docker host.
-type imageNotFoundError struct {
-	imageID string
+type objectNotFoundError struct {
+	object string
+	id     string
 }
 
-// NotFound indicates that this error type is of NotFound
-func (e imageNotFoundError) NotFound() bool {
+func (e objectNotFoundError) NotFound() bool {
 	return true
 }
 
-// Error returns a string representation of an imageNotFoundError
-func (e imageNotFoundError) Error() string {
-	return fmt.Sprintf("Error: No such image: %s", e.imageID)
+func (e objectNotFoundError) Error() string {
+	return fmt.Sprintf("Error: No such %s: %s", e.object, e.id)
 }
 
-// IsErrImageNotFound returns true if the error is caused
-// when an image is not found in the docker host.
-func IsErrImageNotFound(err error) bool {
-	return IsErrNotFound(err)
-}
-
-// containerNotFoundError implements an error returned when a container is not in the docker host.
-type containerNotFoundError struct {
-	containerID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e containerNotFoundError) NotFound() bool {
-	return true
-}
-
-// Error returns a string representation of a containerNotFoundError
-func (e containerNotFoundError) Error() string {
-	return fmt.Sprintf("Error: No such container: %s", e.containerID)
-}
-
-// IsErrContainerNotFound returns true if the error is caused
-// when a container is not found in the docker host.
-func IsErrContainerNotFound(err error) bool {
-	return IsErrNotFound(err)
-}
-
-// networkNotFoundError implements an error returned when a network is not in the docker host.
-type networkNotFoundError struct {
-	networkID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e networkNotFoundError) NotFound() bool {
-	return true
-}
-
-// Error returns a string representation of a networkNotFoundError
-func (e networkNotFoundError) Error() string {
-	return fmt.Sprintf("Error: No such network: %s", e.networkID)
-}
-
-// IsErrNetworkNotFound returns true if the error is caused
-// when a network is not found in the docker host.
-func IsErrNetworkNotFound(err error) bool {
-	return IsErrNotFound(err)
-}
-
-// volumeNotFoundError implements an error returned when a volume is not in the docker host.
-type volumeNotFoundError struct {
-	volumeID string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e volumeNotFoundError) NotFound() bool {
-	return true
-}
-
-// Error returns a string representation of a volumeNotFoundError
-func (e volumeNotFoundError) Error() string {
-	return fmt.Sprintf("Error: No such volume: %s", e.volumeID)
-}
-
-// IsErrVolumeNotFound returns true if the error is caused
-// when a volume is not found in the docker host.
-func IsErrVolumeNotFound(err error) bool {
-	return IsErrNotFound(err)
+func wrapResponseError(err error, resp serverResponse, object, id string) error {
+	switch {
+	case err == nil:
+		return nil
+	case resp.statusCode == http.StatusNotFound:
+		return objectNotFoundError{object: object, id: id}
+	case resp.statusCode == http.StatusNotImplemented:
+		return notImplementedError{message: err.Error()}
+	default:
+		return err
+	}
 }
 
 // unauthorizedError represents an authorization error in a remote registry.
@@ -144,72 +88,6 @@
 	return ok
 }
 
-// nodeNotFoundError implements an error returned when a node is not found.
-type nodeNotFoundError struct {
-	nodeID string
-}
-
-// Error returns a string representation of a nodeNotFoundError
-func (e nodeNotFoundError) Error() string {
-	return fmt.Sprintf("Error: No such node: %s", e.nodeID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e nodeNotFoundError) NotFound() bool {
-	return true
-}
-
-// IsErrNodeNotFound returns true if the error is caused
-// when a node is not found.
-func IsErrNodeNotFound(err error) bool {
-	_, ok := err.(nodeNotFoundError)
-	return ok
-}
-
-// serviceNotFoundError implements an error returned when a service is not found.
-type serviceNotFoundError struct {
-	serviceID string
-}
-
-// Error returns a string representation of a serviceNotFoundError
-func (e serviceNotFoundError) Error() string {
-	return fmt.Sprintf("Error: No such service: %s", e.serviceID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e serviceNotFoundError) NotFound() bool {
-	return true
-}
-
-// IsErrServiceNotFound returns true if the error is caused
-// when a service is not found.
-func IsErrServiceNotFound(err error) bool {
-	_, ok := err.(serviceNotFoundError)
-	return ok
-}
-
-// taskNotFoundError implements an error returned when a task is not found.
-type taskNotFoundError struct {
-	taskID string
-}
-
-// Error returns a string representation of a taskNotFoundError
-func (e taskNotFoundError) Error() string {
-	return fmt.Sprintf("Error: No such task: %s", e.taskID)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e taskNotFoundError) NotFound() bool {
-	return true
-}
-
-// IsErrTaskNotFound returns true if the error is caused
-// when a task is not found.
-func IsErrTaskNotFound(err error) bool {
-	_, ok := err.(taskNotFoundError)
-	return ok
-}
-
 type pluginPermissionDenied struct {
 	name string
 }
@@ -225,6 +103,26 @@
 	return ok
 }
 
+type notImplementedError struct {
+	message string
+}
+
+func (e notImplementedError) Error() string {
+	return e.message
+}
+
+func (e notImplementedError) NotImplemented() bool {
+	return true
+}
+
+// IsErrNotImplemented returns true if the error is a NotImplemented error.
+// This is returned by the API when a requested feature has not been
+// implemented.
+func IsErrNotImplemented(err error) bool {
+	te, ok := err.(notImplementedError)
+	return ok && te.NotImplemented()
+}
+
 // NewVersionError returns an error if the APIVersion required
 // if less than the current supported version
 func (cli *Client) NewVersionError(APIrequired, feature string) error {
@@ -233,68 +131,3 @@
 	}
 	return nil
 }
-
-// secretNotFoundError implements an error returned when a secret is not found.
-type secretNotFoundError struct {
-	name string
-}
-
-// Error returns a string representation of a secretNotFoundError
-func (e secretNotFoundError) Error() string {
-	return fmt.Sprintf("Error: no such secret: %s", e.name)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e secretNotFoundError) NotFound() bool {
-	return true
-}
-
-// IsErrSecretNotFound returns true if the error is caused
-// when a secret is not found.
-func IsErrSecretNotFound(err error) bool {
-	_, ok := err.(secretNotFoundError)
-	return ok
-}
-
-// configNotFoundError implements an error returned when a config is not found.
-type configNotFoundError struct {
-	name string
-}
-
-// Error returns a string representation of a configNotFoundError
-func (e configNotFoundError) Error() string {
-	return fmt.Sprintf("Error: no such config: %s", e.name)
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e configNotFoundError) NotFound() bool {
-	return true
-}
-
-// IsErrConfigNotFound returns true if the error is caused
-// when a config is not found.
-func IsErrConfigNotFound(err error) bool {
-	_, ok := err.(configNotFoundError)
-	return ok
-}
-
-// pluginNotFoundError implements an error returned when a plugin is not in the docker host.
-type pluginNotFoundError struct {
-	name string
-}
-
-// NotFound indicates that this error type is of NotFound
-func (e pluginNotFoundError) NotFound() bool {
-	return true
-}
-
-// Error returns a string representation of a pluginNotFoundError
-func (e pluginNotFoundError) Error() string {
-	return fmt.Sprintf("Error: No such plugin: %s", e.name)
-}
-
-// IsErrPluginNotFound returns true if the error is caused
-// when a plugin is not found in the docker host.
-func IsErrPluginNotFound(err error) bool {
-	return IsErrNotFound(err)
-}
diff --git a/client/hijack.go b/client/hijack.go
index 8cf0119..d04cebd 100644
--- a/client/hijack.go
+++ b/client/hijack.go
@@ -12,7 +12,6 @@
 	"time"
 
 	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/pkg/tlsconfig"
 	"github.com/docker/go-connections/sockets"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
@@ -71,7 +70,7 @@
 	timeout := dialer.Timeout
 
 	if !dialer.Deadline.IsZero() {
-		deadlineTimeout := dialer.Deadline.Sub(time.Now())
+		deadlineTimeout := time.Until(dialer.Deadline)
 		if timeout == 0 || deadlineTimeout < timeout {
 			timeout = deadlineTimeout
 		}
@@ -115,7 +114,7 @@
 	// from the hostname we're connecting to.
 	if config.ServerName == "" {
 		// Make a copy to avoid polluting argument or default.
-		config = tlsconfig.Clone(config)
+		config = tlsConfigClone(config)
 		config.ServerName = hostname
 	}
 
diff --git a/client/image_build.go b/client/image_build.go
index 44a215f..cd0f54d 100644
--- a/client/image_build.go
+++ b/client/image_build.go
@@ -7,6 +7,7 @@
 	"net/http"
 	"net/url"
 	"strconv"
+	"strings"
 
 	"golang.org/x/net/context"
 
@@ -29,6 +30,13 @@
 		return types.ImageBuildResponse{}, err
 	}
 	headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf))
+
+	if options.Platform != "" {
+		if err := cli.NewVersionError("1.32", "platform"); err != nil {
+			return types.ImageBuildResponse{}, err
+		}
+		query.Set("platform", options.Platform)
+	}
 	headers.Set("Content-Type", "application/x-tar")
 
 	serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers)
@@ -123,6 +131,8 @@
 	if options.SessionID != "" {
 		query.Set("session", options.SessionID)
 	}
-
+	if options.Platform != "" {
+		query.Set("platform", strings.ToLower(options.Platform))
+	}
 	return query, nil
 }
diff --git a/client/image_create.go b/client/image_create.go
index 4436abb..fb5447b 100644
--- a/client/image_create.go
+++ b/client/image_create.go
@@ -3,6 +3,7 @@
 import (
 	"io"
 	"net/url"
+	"strings"
 
 	"golang.org/x/net/context"
 
@@ -21,6 +22,9 @@
 	query := url.Values{}
 	query.Set("fromImage", reference.FamiliarName(ref))
 	query.Set("tag", getAPITagFromNamedRef(ref))
+	if options.Platform != "" {
+		query.Set("platform", strings.ToLower(options.Platform))
+	}
 	resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
 	if err != nil {
 		return nil, err
diff --git a/client/image_import.go b/client/image_import.go
index d7dedd8..ab55ddb 100644
--- a/client/image_import.go
+++ b/client/image_import.go
@@ -3,6 +3,7 @@
 import (
 	"io"
 	"net/url"
+	"strings"
 
 	"golang.org/x/net/context"
 
@@ -25,6 +26,9 @@
 	query.Set("repo", ref)
 	query.Set("tag", options.Tag)
 	query.Set("message", options.Message)
+	if options.Platform != "" {
+		query.Set("platform", strings.ToLower(options.Platform))
+	}
 	for _, change := range options.Changes {
 		query.Add("changes", change)
 	}
diff --git a/client/image_inspect.go b/client/image_inspect.go
index b3a64ce..1bc5919 100644
--- a/client/image_inspect.go
+++ b/client/image_inspect.go
@@ -4,7 +4,6 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
 
 	"github.com/docker/docker/api/types"
 	"golang.org/x/net/context"
@@ -14,10 +13,7 @@
 func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) {
 	serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", nil, nil)
 	if err != nil {
-		if serverResp.statusCode == http.StatusNotFound {
-			return types.ImageInspect{}, nil, imageNotFoundError{imageID}
-		}
-		return types.ImageInspect{}, nil, err
+		return types.ImageInspect{}, nil, wrapResponseError(err, serverResp, "image", imageID)
 	}
 	defer ensureReaderClosed(serverResp)
 
diff --git a/client/image_inspect_test.go b/client/image_inspect_test.go
index 74a4e49..b5721f3 100644
--- a/client/image_inspect_test.go
+++ b/client/image_inspect_test.go
@@ -31,7 +31,7 @@
 	}
 
 	_, _, err := client.ImageInspectWithRaw(context.Background(), "unknown")
-	if err == nil || !IsErrImageNotFound(err) {
+	if err == nil || !IsErrNotFound(err) {
 		t.Fatalf("expected an imageNotFound error, got %v", err)
 	}
 }
diff --git a/client/image_pull.go b/client/image_pull.go
index a72b9bf..92942d2 100644
--- a/client/image_pull.go
+++ b/client/image_pull.go
@@ -4,6 +4,7 @@
 	"io"
 	"net/http"
 	"net/url"
+	"strings"
 
 	"golang.org/x/net/context"
 
@@ -30,6 +31,9 @@
 	if !options.All {
 		query.Set("tag", getAPITagFromNamedRef(ref))
 	}
+	if options.Platform != "" {
+		query.Set("platform", strings.ToLower(options.Platform))
+	}
 
 	resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth)
 	if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil {
diff --git a/client/image_remove.go b/client/image_remove.go
index 94e4b74..81d6c54 100644
--- a/client/image_remove.go
+++ b/client/image_remove.go
@@ -2,7 +2,6 @@
 
 import (
 	"encoding/json"
-	"net/http"
 	"net/url"
 
 	"github.com/docker/docker/api/types"
@@ -20,15 +19,12 @@
 		query.Set("noprune", "1")
 	}
 
+	var dels []types.ImageDeleteResponseItem
 	resp, err := cli.delete(ctx, "/images/"+imageID, query, nil)
 	if err != nil {
-		if resp.statusCode == http.StatusNotFound {
-			return nil, imageNotFoundError{imageID}
-		}
-		return nil, err
+		return dels, wrapResponseError(err, resp, "image", imageID)
 	}
 
-	var dels []types.ImageDeleteResponseItem
 	err = json.NewDecoder(resp.body).Decode(&dels)
 	ensureReaderClosed(resp)
 	return dels, err
diff --git a/client/image_remove_test.go b/client/image_remove_test.go
index 5a391b0..6149db7 100644
--- a/client/image_remove_test.go
+++ b/client/image_remove_test.go
@@ -10,6 +10,7 @@
 	"testing"
 
 	"github.com/docker/docker/api/types"
+	"github.com/stretchr/testify/assert"
 	"golang.org/x/net/context"
 )
 
@@ -19,20 +20,17 @@
 	}
 
 	_, err := client.ImageRemove(context.Background(), "image_id", types.ImageRemoveOptions{})
-	if err == nil || err.Error() != "Error response from daemon: Server error" {
-		t.Fatalf("expected a Server Error, got %v", err)
-	}
+	assert.EqualError(t, err, "Error response from daemon: Server error")
 }
 
 func TestImageRemoveImageNotFound(t *testing.T) {
 	client := &Client{
-		client: newMockClient(errorMock(http.StatusNotFound, "Server error")),
+		client: newMockClient(errorMock(http.StatusNotFound, "missing")),
 	}
 
 	_, err := client.ImageRemove(context.Background(), "unknown", types.ImageRemoveOptions{})
-	if err == nil || !IsErrNotFound(err) {
-		t.Fatalf("expected an imageNotFoundError error, got %v", err)
-	}
+	assert.EqualError(t, err, "Error: No such image: unknown")
+	assert.True(t, IsErrNotFound(err))
 }
 
 func TestImageRemove(t *testing.T) {
diff --git a/client/image_search.go b/client/image_search.go
index b0fcd5c..5566e92 100644
--- a/client/image_search.go
+++ b/client/image_search.go
@@ -21,7 +21,7 @@
 	query.Set("limit", fmt.Sprintf("%d", options.Limit))
 
 	if options.Filters.Len() > 0 {
-		filterJSON, err := filters.ToParam(options.Filters)
+		filterJSON, err := filters.ToJSON(options.Filters)
 		if err != nil {
 			return results, err
 		}
diff --git a/client/interface.go b/client/interface.go
index acd4de1..dd8b388 100644
--- a/client/interface.go
+++ b/client/interface.go
@@ -45,7 +45,7 @@
 	ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error)
 	ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error)
 	ContainerDiff(ctx context.Context, container string) ([]container.ContainerChangeResponseItem, error)
-	ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error)
+	ContainerExecAttach(ctx context.Context, execID string, config types.ExecStartCheck) (types.HijackedResponse, error)
 	ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error)
 	ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error)
 	ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error
diff --git a/client/network_inspect.go b/client/network_inspect.go
index 848c979..afabe65 100644
--- a/client/network_inspect.go
+++ b/client/network_inspect.go
@@ -4,7 +4,6 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
 	"net/url"
 
 	"github.com/docker/docker/api/types"
@@ -33,10 +32,7 @@
 	}
 	resp, err = cli.get(ctx, "/networks/"+networkID, query, nil)
 	if err != nil {
-		if resp.statusCode == http.StatusNotFound {
-			return networkResource, nil, networkNotFoundError{networkID}
-		}
-		return networkResource, nil, err
+		return networkResource, nil, wrapResponseError(err, resp, "network", networkID)
 	}
 	defer ensureReaderClosed(resp)
 
diff --git a/client/network_inspect_test.go b/client/network_inspect_test.go
index 9bfb55d..56399c7 100644
--- a/client/network_inspect_test.go
+++ b/client/network_inspect_test.go
@@ -21,20 +21,17 @@
 	}
 
 	_, err := client.NetworkInspect(context.Background(), "nothing", types.NetworkInspectOptions{})
-	if err == nil || err.Error() != "Error response from daemon: Server error" {
-		t.Fatalf("expected a Server Error, got %v", err)
-	}
+	assert.EqualError(t, err, "Error response from daemon: Server error")
 }
 
-func TestNetworkInspectContainerNotFound(t *testing.T) {
+func TestNetworkInspectNotFoundError(t *testing.T) {
 	client := &Client{
-		client: newMockClient(errorMock(http.StatusNotFound, "Server error")),
+		client: newMockClient(errorMock(http.StatusNotFound, "missing")),
 	}
 
 	_, err := client.NetworkInspect(context.Background(), "unknown", types.NetworkInspectOptions{})
-	if err == nil || !IsErrNetworkNotFound(err) {
-		t.Fatalf("expected a networkNotFound error, got %v", err)
-	}
+	assert.EqualError(t, err, "Error: No such network: unknown")
+	assert.True(t, IsErrNotFound(err))
 }
 
 func TestNetworkInspect(t *testing.T) {
diff --git a/client/network_remove.go b/client/network_remove.go
index 6bd6748..0811b5b 100644
--- a/client/network_remove.go
+++ b/client/network_remove.go
@@ -6,5 +6,5 @@
 func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error {
 	resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil)
 	ensureReaderClosed(resp)
-	return err
+	return wrapResponseError(err, resp, "network", networkID)
 }
diff --git a/client/node_inspect.go b/client/node_inspect.go
index abf505d..791d2c0 100644
--- a/client/node_inspect.go
+++ b/client/node_inspect.go
@@ -4,7 +4,6 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
 
 	"github.com/docker/docker/api/types/swarm"
 	"golang.org/x/net/context"
@@ -14,10 +13,7 @@
 func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) {
 	serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil)
 	if err != nil {
-		if serverResp.statusCode == http.StatusNotFound {
-			return swarm.Node{}, nil, nodeNotFoundError{nodeID}
-		}
-		return swarm.Node{}, nil, err
+		return swarm.Node{}, nil, wrapResponseError(err, serverResp, "node", nodeID)
 	}
 	defer ensureReaderClosed(serverResp)
 
diff --git a/client/node_inspect_test.go b/client/node_inspect_test.go
index dca16a8..6260da5 100644
--- a/client/node_inspect_test.go
+++ b/client/node_inspect_test.go
@@ -30,7 +30,7 @@
 	}
 
 	_, _, err := client.NodeInspectWithRaw(context.Background(), "unknown")
-	if err == nil || !IsErrNodeNotFound(err) {
+	if err == nil || !IsErrNotFound(err) {
 		t.Fatalf("expected a nodeNotFoundError error, got %v", err)
 	}
 }
diff --git a/client/node_list.go b/client/node_list.go
index 3e8440f..fed2299 100644
--- a/client/node_list.go
+++ b/client/node_list.go
@@ -15,7 +15,7 @@
 	query := url.Values{}
 
 	if options.Filters.Len() > 0 {
-		filterJSON, err := filters.ToParam(options.Filters)
+		filterJSON, err := filters.ToJSON(options.Filters)
 
 		if err != nil {
 			return nil, err
diff --git a/client/node_remove.go b/client/node_remove.go
index 0a77f3d..adbf52f 100644
--- a/client/node_remove.go
+++ b/client/node_remove.go
@@ -17,5 +17,5 @@
 
 	resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil)
 	ensureReaderClosed(resp)
-	return err
+	return wrapResponseError(err, resp, "node", nodeID)
 }
diff --git a/client/parse_logs.go b/client/parse_logs.go
deleted file mode 100644
index e427f80..0000000
--- a/client/parse_logs.go
+++ /dev/null
@@ -1,41 +0,0 @@
-package client
-
-// parse_logs.go contains utility helpers for getting information out of docker
-// log lines. really, it only contains ParseDetails right now. maybe in the
-// future there will be some desire to parse log messages back into a struct?
-// that would go here if we did
-
-import (
-	"net/url"
-	"strings"
-
-	"github.com/pkg/errors"
-)
-
-// ParseLogDetails takes a details string of key value pairs in the form
-// "k=v,l=w", where the keys and values are url query escaped, and each pair
-// is separated by a comma, returns a map. returns an error if the details
-// string is not in a valid format
-// the exact form of details encoding is implemented in
-// api/server/httputils/write_log_stream.go
-func ParseLogDetails(details string) (map[string]string, error) {
-	pairs := strings.Split(details, ",")
-	detailsMap := make(map[string]string, len(pairs))
-	for _, pair := range pairs {
-		p := strings.SplitN(pair, "=", 2)
-		// if there is no equals sign, we will only get 1 part back
-		if len(p) != 2 {
-			return nil, errors.New("invalid details format")
-		}
-		k, err := url.QueryUnescape(p[0])
-		if err != nil {
-			return nil, err
-		}
-		v, err := url.QueryUnescape(p[1])
-		if err != nil {
-			return nil, err
-		}
-		detailsMap[k] = v
-	}
-	return detailsMap, nil
-}
diff --git a/client/parse_logs_test.go b/client/parse_logs_test.go
deleted file mode 100644
index ac7f616..0000000
--- a/client/parse_logs_test.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package client
-
-import (
-	"reflect"
-	"testing"
-
-	"github.com/pkg/errors"
-)
-
-func TestParseLogDetails(t *testing.T) {
-	testCases := []struct {
-		line     string
-		expected map[string]string
-		err      error
-	}{
-		{"key=value", map[string]string{"key": "value"}, nil},
-		{"key1=value1,key2=value2", map[string]string{"key1": "value1", "key2": "value2"}, nil},
-		{"key+with+spaces=value%3Dequals,asdf%2C=", map[string]string{"key with spaces": "value=equals", "asdf,": ""}, nil},
-		{"key=,=nothing", map[string]string{"key": "", "": "nothing"}, nil},
-		{"=", map[string]string{"": ""}, nil},
-		{"errors", nil, errors.New("invalid details format")},
-	}
-	for _, tc := range testCases {
-		tc := tc // capture range variable
-		t.Run(tc.line, func(t *testing.T) {
-			t.Parallel()
-			res, err := ParseLogDetails(tc.line)
-			if err != nil && (err.Error() != tc.err.Error()) {
-				t.Fatalf("unexpected error parsing logs:\nExpected:\n\t%v\nActual:\n\t%v", tc.err, err)
-			}
-			if !reflect.DeepEqual(tc.expected, res) {
-				t.Errorf("result does not match expected:\nExpected:\n\t%#v\nActual:\n\t%#v", tc.expected, res)
-			}
-		})
-	}
-}
diff --git a/client/ping.go b/client/ping.go
index 8501375..0b6e450 100644
--- a/client/ping.go
+++ b/client/ping.go
@@ -28,7 +28,5 @@
 		}
 		ping.OSType = serverResp.header.Get("OSType")
 	}
-
-	err = cli.checkResponseErr(serverResp)
-	return ping, err
+	return ping, cli.checkResponseErr(serverResp)
 }
diff --git a/client/plugin_inspect.go b/client/plugin_inspect.go
index 89f39ee..6a6fc18 100644
--- a/client/plugin_inspect.go
+++ b/client/plugin_inspect.go
@@ -4,7 +4,6 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
 
 	"github.com/docker/docker/api/types"
 	"golang.org/x/net/context"
@@ -14,10 +13,7 @@
 func (cli *Client) PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) {
 	resp, err := cli.get(ctx, "/plugins/"+name+"/json", nil, nil)
 	if err != nil {
-		if resp.statusCode == http.StatusNotFound {
-			return nil, nil, pluginNotFoundError{name}
-		}
-		return nil, nil, err
+		return nil, nil, wrapResponseError(err, resp, "plugin", name)
 	}
 
 	defer ensureReaderClosed(resp)
diff --git a/client/plugin_list.go b/client/plugin_list.go
index 3acde3b..78dbeb8 100644
--- a/client/plugin_list.go
+++ b/client/plugin_list.go
@@ -23,7 +23,7 @@
 	}
 	resp, err := cli.get(ctx, "/plugins", query, nil)
 	if err != nil {
-		return plugins, err
+		return plugins, wrapResponseError(err, resp, "plugin", "")
 	}
 
 	err = json.NewDecoder(resp.body).Decode(&plugins)
diff --git a/client/plugin_remove.go b/client/plugin_remove.go
index b017e4d..b498c48 100644
--- a/client/plugin_remove.go
+++ b/client/plugin_remove.go
@@ -16,5 +16,5 @@
 
 	resp, err := cli.delete(ctx, "/plugins/"+name, query, nil)
 	ensureReaderClosed(resp)
-	return err
+	return wrapResponseError(err, resp, "plugin", name)
 }
diff --git a/client/request.go b/client/request.go
index 3e7d43f..615d0b9 100644
--- a/client/request.go
+++ b/client/request.go
@@ -203,7 +203,7 @@
 		return err
 	}
 	if len(body) == 0 {
-		return fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
+		return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), serverResp.reqURL)
 	}
 
 	var ct string
diff --git a/client/secret_inspect.go b/client/secret_inspect.go
index 9b60297..6927ea9 100644
--- a/client/secret_inspect.go
+++ b/client/secret_inspect.go
@@ -4,7 +4,6 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
 
 	"github.com/docker/docker/api/types/swarm"
 	"golang.org/x/net/context"
@@ -17,10 +16,7 @@
 	}
 	resp, err := cli.get(ctx, "/secrets/"+id, nil, nil)
 	if err != nil {
-		if resp.statusCode == http.StatusNotFound {
-			return swarm.Secret{}, nil, secretNotFoundError{id}
-		}
-		return swarm.Secret{}, nil, err
+		return swarm.Secret{}, nil, wrapResponseError(err, resp, "secret", id)
 	}
 	defer ensureReaderClosed(resp)
 
diff --git a/client/secret_inspect_test.go b/client/secret_inspect_test.go
index 1581da1..66dbb48 100644
--- a/client/secret_inspect_test.go
+++ b/client/secret_inspect_test.go
@@ -42,7 +42,7 @@
 	}
 
 	_, _, err := client.SecretInspectWithRaw(context.Background(), "unknown")
-	if err == nil || !IsErrSecretNotFound(err) {
+	if err == nil || !IsErrNotFound(err) {
 		t.Fatalf("expected a secretNotFoundError error, got %v", err)
 	}
 }
diff --git a/client/secret_list.go b/client/secret_list.go
index 0d33ecf..fdee6e2 100644
--- a/client/secret_list.go
+++ b/client/secret_list.go
@@ -18,7 +18,7 @@
 	query := url.Values{}
 
 	if options.Filters.Len() > 0 {
-		filterJSON, err := filters.ToParam(options.Filters)
+		filterJSON, err := filters.ToJSON(options.Filters)
 		if err != nil {
 			return nil, err
 		}
diff --git a/client/secret_remove.go b/client/secret_remove.go
index c5e37af..9b4ee71 100644
--- a/client/secret_remove.go
+++ b/client/secret_remove.go
@@ -9,5 +9,5 @@
 	}
 	resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil)
 	ensureReaderClosed(resp)
-	return err
+	return wrapResponseError(err, resp, "secret", id)
 }
diff --git a/client/service_create.go b/client/service_create.go
index 6b9364d..834709d 100644
--- a/client/service_create.go
+++ b/client/service_create.go
@@ -3,6 +3,7 @@
 import (
 	"encoding/json"
 	"fmt"
+	"strings"
 
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
@@ -87,19 +88,28 @@
 
 func imageDigestAndPlatforms(ctx context.Context, cli DistributionAPIClient, image, encodedAuth string) (string, []swarm.Platform, error) {
 	distributionInspect, err := cli.DistributionInspect(ctx, image, encodedAuth)
-	imageWithDigest := image
 	var platforms []swarm.Platform
 	if err != nil {
 		return "", nil, err
 	}
 
-	imageWithDigest = imageWithDigestString(image, distributionInspect.Descriptor.Digest)
+	imageWithDigest := imageWithDigestString(image, distributionInspect.Descriptor.Digest)
 
 	if len(distributionInspect.Platforms) > 0 {
 		platforms = make([]swarm.Platform, 0, len(distributionInspect.Platforms))
 		for _, p := range distributionInspect.Platforms {
+			// clear architecture field for arm. This is a temporary patch to address
+			// https://github.com/docker/swarmkit/issues/2294. The issue is that while
+			// image manifests report "arm" as the architecture, the node reports
+			// something like "armv7l" (includes the variant), which causes arm images
+			// to stop working with swarm mode. This patch removes the architecture
+			// constraint for arm images to ensure tasks get scheduled.
+			arch := p.Architecture
+			if strings.ToLower(arch) == "arm" {
+				arch = ""
+			}
 			platforms = append(platforms, swarm.Platform{
-				Architecture: p.Architecture,
+				Architecture: arch,
 				OS:           p.OS,
 			})
 		}
diff --git a/client/service_inspect.go b/client/service_inspect.go
index d7e051e..3e9699e 100644
--- a/client/service_inspect.go
+++ b/client/service_inspect.go
@@ -5,7 +5,6 @@
 	"encoding/json"
 	"fmt"
 	"io/ioutil"
-	"net/http"
 	"net/url"
 
 	"github.com/docker/docker/api/types"
@@ -19,10 +18,7 @@
 	query.Set("insertDefaults", fmt.Sprintf("%v", opts.InsertDefaults))
 	serverResp, err := cli.get(ctx, "/services/"+serviceID, query, nil)
 	if err != nil {
-		if serverResp.statusCode == http.StatusNotFound {
-			return swarm.Service{}, nil, serviceNotFoundError{serviceID}
-		}
-		return swarm.Service{}, nil, err
+		return swarm.Service{}, nil, wrapResponseError(err, serverResp, "service", serviceID)
 	}
 	defer ensureReaderClosed(serverResp)
 
diff --git a/client/service_inspect_test.go b/client/service_inspect_test.go
index d53f583..ade6253 100644
--- a/client/service_inspect_test.go
+++ b/client/service_inspect_test.go
@@ -31,7 +31,7 @@
 	}
 
 	_, _, err := client.ServiceInspectWithRaw(context.Background(), "unknown", types.ServiceInspectOptions{})
-	if err == nil || !IsErrServiceNotFound(err) {
+	if err == nil || !IsErrNotFound(err) {
 		t.Fatalf("expected a serviceNotFoundError error, got %v", err)
 	}
 }
diff --git a/client/service_list.go b/client/service_list.go
index c29e6d4..eb3ff97 100644
--- a/client/service_list.go
+++ b/client/service_list.go
@@ -15,7 +15,7 @@
 	query := url.Values{}
 
 	if options.Filters.Len() > 0 {
-		filterJSON, err := filters.ToParam(options.Filters)
+		filterJSON, err := filters.ToJSON(options.Filters)
 		if err != nil {
 			return nil, err
 		}
diff --git a/client/service_remove.go b/client/service_remove.go
index a9331f9..ad992c0 100644
--- a/client/service_remove.go
+++ b/client/service_remove.go
@@ -6,5 +6,5 @@
 func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error {
 	resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil)
 	ensureReaderClosed(resp)
-	return err
+	return wrapResponseError(err, resp, "service", serviceID)
 }
diff --git a/client/service_remove_test.go b/client/service_remove_test.go
index 8e2ac25..d7c51ef 100644
--- a/client/service_remove_test.go
+++ b/client/service_remove_test.go
@@ -8,6 +8,7 @@
 	"strings"
 	"testing"
 
+	"github.com/stretchr/testify/assert"
 	"golang.org/x/net/context"
 )
 
@@ -17,9 +18,17 @@
 	}
 
 	err := client.ServiceRemove(context.Background(), "service_id")
-	if err == nil || err.Error() != "Error response from daemon: Server error" {
-		t.Fatalf("expected a Server Error, got %v", err)
+	assert.EqualError(t, err, "Error response from daemon: Server error")
+}
+
+func TestServiceRemoveNotFoundError(t *testing.T) {
+	client := &Client{
+		client: newMockClient(errorMock(http.StatusNotFound, "missing")),
 	}
+
+	err := client.ServiceRemove(context.Background(), "service_id")
+	assert.EqualError(t, err, "Error: No such service: service_id")
+	assert.True(t, IsErrNotFound(err))
 }
 
 func TestServiceRemove(t *testing.T) {
diff --git a/client/task_inspect.go b/client/task_inspect.go
index bc8058f..dc08ced 100644
--- a/client/task_inspect.go
+++ b/client/task_inspect.go
@@ -4,10 +4,8 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
 
 	"github.com/docker/docker/api/types/swarm"
-
 	"golang.org/x/net/context"
 )
 
@@ -15,10 +13,7 @@
 func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) {
 	serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil)
 	if err != nil {
-		if serverResp.statusCode == http.StatusNotFound {
-			return swarm.Task{}, nil, taskNotFoundError{taskID}
-		}
-		return swarm.Task{}, nil, err
+		return swarm.Task{}, nil, wrapResponseError(err, serverResp, "task", taskID)
 	}
 	defer ensureReaderClosed(serverResp)
 
diff --git a/client/task_list.go b/client/task_list.go
index 66324da..01bd695 100644
--- a/client/task_list.go
+++ b/client/task_list.go
@@ -15,7 +15,7 @@
 	query := url.Values{}
 
 	if options.Filters.Len() > 0 {
-		filterJSON, err := filters.ToParam(options.Filters)
+		filterJSON, err := filters.ToJSON(options.Filters)
 		if err != nil {
 			return nil, err
 		}
diff --git a/client/tlsconfig_clone.go b/client/tlsconfig_clone.go
new file mode 100644
index 0000000..99b6be1
--- /dev/null
+++ b/client/tlsconfig_clone.go
@@ -0,0 +1,11 @@
+// +build go1.8
+
+package client
+
+import "crypto/tls"
+
+// tlsConfigClone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func tlsConfigClone(c *tls.Config) *tls.Config {
+	return c.Clone()
+}
diff --git a/client/tlsconfig_clone_go17.go b/client/tlsconfig_clone_go17.go
new file mode 100644
index 0000000..b837b2a
--- /dev/null
+++ b/client/tlsconfig_clone_go17.go
@@ -0,0 +1,33 @@
+// +build go1.7,!go1.8
+
+package client
+
+import "crypto/tls"
+
+// tlsConfigClone returns a clone of tls.Config. This function is provided for
+// compatibility for go1.7 that doesn't include this method in stdlib.
+func tlsConfigClone(c *tls.Config) *tls.Config {
+	return &tls.Config{
+		Rand:                        c.Rand,
+		Time:                        c.Time,
+		Certificates:                c.Certificates,
+		NameToCertificate:           c.NameToCertificate,
+		GetCertificate:              c.GetCertificate,
+		RootCAs:                     c.RootCAs,
+		NextProtos:                  c.NextProtos,
+		ServerName:                  c.ServerName,
+		ClientAuth:                  c.ClientAuth,
+		ClientCAs:                   c.ClientCAs,
+		InsecureSkipVerify:          c.InsecureSkipVerify,
+		CipherSuites:                c.CipherSuites,
+		PreferServerCipherSuites:    c.PreferServerCipherSuites,
+		SessionTicketsDisabled:      c.SessionTicketsDisabled,
+		SessionTicketKey:            c.SessionTicketKey,
+		ClientSessionCache:          c.ClientSessionCache,
+		MinVersion:                  c.MinVersion,
+		MaxVersion:                  c.MaxVersion,
+		CurvePreferences:            c.CurvePreferences,
+		DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+		Renegotiation:               c.Renegotiation,
+	}
+}
diff --git a/client/utils.go b/client/utils.go
index f3d8877..1377050 100644
--- a/client/utils.go
+++ b/client/utils.go
@@ -24,7 +24,7 @@
 func getFiltersQuery(f filters.Args) (url.Values, error) {
 	query := url.Values{}
 	if f.Len() > 0 {
-		filterJSON, err := filters.ToParam(f)
+		filterJSON, err := filters.ToJSON(f)
 		if err != nil {
 			return query, err
 		}
diff --git a/client/volume_inspect.go b/client/volume_inspect.go
index 3860e9b..9889343 100644
--- a/client/volume_inspect.go
+++ b/client/volume_inspect.go
@@ -4,7 +4,7 @@
 	"bytes"
 	"encoding/json"
 	"io/ioutil"
-	"net/http"
+	"path"
 
 	"github.com/docker/docker/api/types"
 	"golang.org/x/net/context"
@@ -18,13 +18,17 @@
 
 // VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation
 func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) {
+	// The empty ID needs to be handled here because with an empty ID the
+	// request url will not contain a trailing / which calls the volume list API
+	// instead of volume inspect
+	if volumeID == "" {
+		return types.Volume{}, nil, objectNotFoundError{object: "volume", id: volumeID}
+	}
+
 	var volume types.Volume
-	resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil)
+	resp, err := cli.get(ctx, path.Join("/volumes", volumeID), nil, nil)
 	if err != nil {
-		if resp.statusCode == http.StatusNotFound {
-			return volume, nil, volumeNotFoundError{volumeID}
-		}
-		return volume, nil, err
+		return volume, nil, wrapResponseError(err, resp, "volume", volumeID)
 	}
 	defer ensureReaderClosed(resp)
 
diff --git a/client/volume_inspect_test.go b/client/volume_inspect_test.go
index 0d1d118..7d01f44 100644
--- a/client/volume_inspect_test.go
+++ b/client/volume_inspect_test.go
@@ -10,6 +10,9 @@
 	"testing"
 
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/internal/testutil"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 	"golang.org/x/net/context"
 )
 
@@ -19,9 +22,7 @@
 	}
 
 	_, err := client.VolumeInspect(context.Background(), "nothing")
-	if err == nil || err.Error() != "Error response from daemon: Server error" {
-		t.Fatalf("expected a Server Error, got %v", err)
-	}
+	testutil.ErrorContains(t, err, "Error response from daemon: Server error")
 }
 
 func TestVolumeInspectNotFound(t *testing.T) {
@@ -30,13 +31,34 @@
 	}
 
 	_, err := client.VolumeInspect(context.Background(), "unknown")
-	if err == nil || !IsErrVolumeNotFound(err) {
-		t.Fatalf("expected a volumeNotFound error, got %v", err)
+	assert.True(t, IsErrNotFound(err))
+}
+
+func TestVolumeInspectWithEmptyID(t *testing.T) {
+	expectedURL := "/volumes/"
+
+	client := &Client{
+		client: newMockClient(func(req *http.Request) (*http.Response, error) {
+			assert.Equal(t, req.URL.Path, expectedURL)
+			return &http.Response{
+				StatusCode: http.StatusNotFound,
+				Body:       ioutil.NopCloser(bytes.NewReader(nil)),
+			}, nil
+		}),
 	}
+	_, err := client.VolumeInspect(context.Background(), "")
+	testutil.ErrorContains(t, err, "No such volume: ")
+
 }
 
 func TestVolumeInspect(t *testing.T) {
 	expectedURL := "/volumes/volume_id"
+	expected := types.Volume{
+		Name:       "name",
+		Driver:     "driver",
+		Mountpoint: "mountpoint",
+	}
+
 	client := &Client{
 		client: newMockClient(func(req *http.Request) (*http.Response, error) {
 			if !strings.HasPrefix(req.URL.Path, expectedURL) {
@@ -45,11 +67,7 @@
 			if req.Method != "GET" {
 				return nil, fmt.Errorf("expected GET method, got %s", req.Method)
 			}
-			content, err := json.Marshal(types.Volume{
-				Name:       "name",
-				Driver:     "driver",
-				Mountpoint: "mountpoint",
-			})
+			content, err := json.Marshal(expected)
 			if err != nil {
 				return nil, err
 			}
@@ -60,17 +78,7 @@
 		}),
 	}
 
-	v, err := client.VolumeInspect(context.Background(), "volume_id")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if v.Name != "name" {
-		t.Fatalf("expected `name`, got %s", v.Name)
-	}
-	if v.Driver != "driver" {
-		t.Fatalf("expected `driver`, got %s", v.Driver)
-	}
-	if v.Mountpoint != "mountpoint" {
-		t.Fatalf("expected `mountpoint`, got %s", v.Mountpoint)
-	}
+	volume, err := client.VolumeInspect(context.Background(), "volume_id")
+	require.NoError(t, err)
+	assert.Equal(t, expected, volume)
 }
diff --git a/client/volume_remove.go b/client/volume_remove.go
index 6c26575..3ffb8bc 100644
--- a/client/volume_remove.go
+++ b/client/volume_remove.go
@@ -17,5 +17,5 @@
 	}
 	resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil)
 	ensureReaderClosed(resp)
-	return err
+	return wrapResponseError(err, resp, "volume", volumeID)
 }
diff --git a/cmd/dockerd/config.go b/cmd/dockerd/config.go
index f80641b..b9d586a 100644
--- a/cmd/dockerd/config.go
+++ b/cmd/dockerd/config.go
@@ -27,6 +27,8 @@
 	flags.Var(opts.NewNamedListOptsRef("exec-opts", &conf.ExecOptions, nil), "exec-opt", "Runtime execution options")
 	flags.StringVarP(&conf.Pidfile, "pidfile", "p", defaultPidFile, "Path to use for daemon PID file")
 	flags.StringVarP(&conf.Root, "graph", "g", defaultDataRoot, "Root of the Docker runtime")
+	flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files")
+	flags.StringVar(&conf.ContainerdAddr, "containerd", "", "containerd grpc address")
 
 	// "--graph" is "soft-deprecated" in favor of "data-root". This flag was added
 	// before Docker 1.0, so won't be removed, only hidden, to discourage its usage.
@@ -63,7 +65,8 @@
 
 	flags.StringVar(&conf.MetricsAddress, "metrics-addr", "", "Set default address and port to serve the metrics api on")
 
-	flags.StringVar(&conf.NodeGenericResources, "node-generic-resources", "", "user defined resources (e.g. fpga=2;gpu={UUID1,UUID2,UUID3})")
+	flags.Var(opts.NewListOptsRef(&conf.NodeGenericResources, opts.ValidateSingleGenericResource), "node-generic-resource", "Advertise user-defined resource")
+
 	flags.IntVar(&conf.NetworkControlPlaneMTU, "network-control-plane-mtu", config.DefaultNetworkMtu, "Network Control plane MTU")
 
 	// "--deprecated-key-path" is to allow configuration of the key used
diff --git a/cmd/dockerd/config_common_unix.go b/cmd/dockerd/config_common_unix.go
index b29307b..febf30a 100644
--- a/cmd/dockerd/config_common_unix.go
+++ b/cmd/dockerd/config_common_unix.go
@@ -1,4 +1,4 @@
-// +build solaris linux freebsd
+// +build linux freebsd
 
 package main
 
diff --git a/cmd/dockerd/config_unix.go b/cmd/dockerd/config_unix.go
index 148fa87..b3bd741 100644
--- a/cmd/dockerd/config_unix.go
+++ b/cmd/dockerd/config_unix.go
@@ -1,4 +1,4 @@
-// +build linux,!solaris freebsd,!solaris
+// +build linux freebsd
 
 package main
 
@@ -29,15 +29,11 @@
 	flags.BoolVar(&conf.BridgeConfig.EnableIPForward, "ip-forward", true, "Enable net.ipv4.ip_forward")
 	flags.BoolVar(&conf.BridgeConfig.EnableIPMasq, "ip-masq", true, "Enable IP masquerading")
 	flags.BoolVar(&conf.BridgeConfig.EnableIPv6, "ipv6", false, "Enable IPv6 networking")
-	flags.StringVar(&conf.ExecRoot, "exec-root", defaultExecRoot, "Root directory for execution state files")
 	flags.StringVar(&conf.BridgeConfig.FixedCIDRv6, "fixed-cidr-v6", "", "IPv6 subnet for fixed IPs")
 	flags.BoolVar(&conf.BridgeConfig.EnableUserlandProxy, "userland-proxy", true, "Use userland proxy for loopback traffic")
 	flags.StringVar(&conf.BridgeConfig.UserlandProxyPath, "userland-proxy-path", "", "Path to the userland proxy binary")
-	flags.BoolVar(&conf.EnableCors, "api-enable-cors", false, "Enable CORS headers in the Engine API, this is deprecated by --api-cors-header")
-	flags.MarkDeprecated("api-enable-cors", "Please use --api-cors-header")
 	flags.StringVar(&conf.CgroupParent, "cgroup-parent", "", "Set parent cgroup for all containers")
 	flags.StringVar(&conf.RemappedRoot, "userns-remap", "", "User/Group setting for user namespaces")
-	flags.StringVar(&conf.ContainerdAddr, "containerd", "", "Path to containerd socket")
 	flags.BoolVar(&conf.LiveRestoreEnabled, "live-restore", false, "Enable live restore of docker when containers are still running")
 	flags.IntVar(&conf.OOMScoreAdjust, "oom-score-adjust", -500, "Set the oom_score_adj for the daemon")
 	flags.BoolVar(&conf.Init, "init", false, "Run an init in the container to forward signals and reap processes")
diff --git a/cmd/dockerd/config_unix_test.go b/cmd/dockerd/config_unix_test.go
index 99b2f90..2705d67 100644
--- a/cmd/dockerd/config_unix_test.go
+++ b/cmd/dockerd/config_unix_test.go
@@ -1,9 +1,8 @@
-// +build linux,!solaris freebsd,!solaris
+// +build linux freebsd
 
 package main
 
 import (
-	"runtime"
 	"testing"
 
 	"github.com/docker/docker/daemon/config"
@@ -12,9 +11,6 @@
 )
 
 func TestDaemonParseShmSize(t *testing.T) {
-	if runtime.GOOS == "solaris" {
-		t.Skip("ShmSize not supported on Solaris\n")
-	}
 	flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
 
 	conf := &config.Config{}
diff --git a/cmd/dockerd/config_windows.go b/cmd/dockerd/config_windows.go
index 79cdd25..36af766 100644
--- a/cmd/dockerd/config_windows.go
+++ b/cmd/dockerd/config_windows.go
@@ -11,6 +11,7 @@
 var (
 	defaultPidFile  string
 	defaultDataRoot = filepath.Join(os.Getenv("programdata"), "docker")
+	defaultExecRoot = filepath.Join(os.Getenv("programdata"), "docker", "exec-root")
 )
 
 // installConfigFlags adds flags to the pflag.FlagSet to configure the daemon
diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go
index c2f3781..44e1667 100644
--- a/cmd/dockerd/daemon.go
+++ b/cmd/dockerd/daemon.go
@@ -38,7 +38,7 @@
 	"github.com/docker/docker/libcontainerd"
 	dopts "github.com/docker/docker/opts"
 	"github.com/docker/docker/pkg/authorization"
-	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/docker/docker/pkg/pidfile"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/signal"
@@ -94,11 +94,13 @@
 	}
 
 	logrus.SetFormatter(&logrus.TextFormatter{
-		TimestampFormat: jsonlog.RFC3339NanoFixed,
+		TimestampFormat: jsonmessage.RFC3339NanoFixed,
 		DisableColors:   cli.Config.RawLogs,
 		FullTimestamp:   true,
 	})
 
+	system.InitLCOW(cli.Config.Experimental)
+
 	if err := setDefaultUmask(); err != nil {
 		return fmt.Errorf("Failed to set umask: %v", err)
 	}
@@ -132,7 +134,6 @@
 		Logging:     true,
 		SocketGroup: cli.Config.SocketGroup,
 		Version:     dockerversion.Version,
-		EnableCors:  cli.Config.EnableCors,
 		CorsHeaders: cli.Config.CorsHeaders,
 	}
 
@@ -198,8 +199,16 @@
 		cli.api.Accept(addr, ls...)
 	}
 
-	registryService := registry.NewService(cli.Config.ServiceOptions)
-	containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...)
+	registryService, err := registry.NewService(cli.Config.ServiceOptions)
+	if err != nil {
+		return err
+	}
+
+	rOpts, err := cli.getRemoteOptions()
+	if err != nil {
+		return fmt.Errorf("Failed to generate containerd options: %s", err)
+	}
+	containerdRemote, err := libcontainerd.New(filepath.Join(cli.Config.Root, "containerd"), filepath.Join(cli.Config.ExecRoot, "containerd"), rOpts...)
 	if err != nil {
 		return err
 	}
@@ -217,10 +226,6 @@
 		logrus.Fatalf("Error creating middlewares: %v", err)
 	}
 
-	if system.LCOWSupported() {
-		logrus.Warnln("LCOW support is enabled - this feature is incomplete")
-	}
-
 	d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote, pluginStore)
 	if err != nil {
 		return fmt.Errorf("Error starting daemon: %v", err)
@@ -467,7 +472,7 @@
 		return nil, err
 	}
 
-	if conf.V2Only == false {
+	if !conf.V2Only {
 		logrus.Warnf(`The "disable-legacy-registry" option is deprecated and wil be removed in Docker v17.12. Interacting with legacy (v1) registries will no longer be supported in Docker v17.12"`)
 	}
 
@@ -548,7 +553,7 @@
 	vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion)
 	s.UseMiddleware(vm)
 
-	if cfg.EnableCors || cfg.CorsHeaders != "" {
+	if cfg.CorsHeaders != "" {
 		c := middleware.NewCORSMiddleware(cfg.CorsHeaders)
 		s.UseMiddleware(c)
 	}
@@ -559,6 +564,17 @@
 	return nil
 }
 
+func (cli *DaemonCli) getRemoteOptions() ([]libcontainerd.RemoteOption, error) {
+	opts := []libcontainerd.RemoteOption{}
+
+	pOpts, err := cli.getPlatformRemoteOptions()
+	if err != nil {
+		return nil, err
+	}
+	opts = append(opts, pOpts...)
+	return opts, nil
+}
+
 // validates that the plugins requested with the --authorization-plugin flag are valid AuthzDriver
 // plugins present on the host and available to the daemon
 func validateAuthzPlugins(requestedPlugins []string, pg plugingetter.PluginGetter) error {
diff --git a/cmd/dockerd/daemon_linux.go b/cmd/dockerd/daemon_linux.go
index a909ee4..b58f0f0 100644
--- a/cmd/dockerd/daemon_linux.go
+++ b/cmd/dockerd/daemon_linux.go
@@ -11,5 +11,5 @@
 // notifySystem sends a message to the host when the server is ready to be used
 func notifySystem() {
 	// Tell the init daemon we are accepting requests
-	go systemdDaemon.SdNotify("READY=1")
+	go systemdDaemon.SdNotify(false, "READY=1")
 }
diff --git a/cmd/dockerd/daemon_solaris.go b/cmd/dockerd/daemon_solaris.go
deleted file mode 100644
index 9ee18da..0000000
--- a/cmd/dockerd/daemon_solaris.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// +build solaris
-
-package main
-
-import (
-	"fmt"
-	"net"
-	"path/filepath"
-
-	"github.com/docker/docker/libcontainerd"
-	"golang.org/x/sys/unix"
-)
-
-const defaultDaemonConfigFile = ""
-
-// setDefaultUmask sets the umask to 0022 to avoid problems
-// caused by custom umask
-func setDefaultUmask() error {
-	desiredUmask := 0022
-	unix.Umask(desiredUmask)
-	if umask := unix.Umask(desiredUmask); umask != desiredUmask {
-		return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask)
-	}
-
-	return nil
-}
-
-func getDaemonConfDir(_ string) string {
-	return "/etc/docker"
-}
-
-// setupConfigReloadTrap configures the USR2 signal to reload the configuration.
-func (cli *DaemonCli) setupConfigReloadTrap() {
-}
-
-// preNotifySystem sends a message to the host when the API is active, but before the daemon is
-func preNotifySystem() {
-}
-
-// notifySystem sends a message to the host when the server is ready to be used
-func notifySystem() {
-}
-
-func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption {
-	opts := []libcontainerd.RemoteOption{}
-	if cli.Config.ContainerdAddr != "" {
-		opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr))
-	} else {
-		opts = append(opts, libcontainerd.WithStartDaemon(true))
-	}
-	return opts
-}
-
-// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to
-// store their state.
-func (cli *DaemonCli) getLibcontainerdRoot() string {
-	return filepath.Join(cli.Config.ExecRoot, "libcontainerd")
-}
-
-// getSwarmRunRoot gets the root directory for swarm to store runtime state
-// For example, the control socket
-func (cli *DaemonCli) getSwarmRunRoot() string {
-	return filepath.Join(cli.Config.ExecRoot, "swarm")
-}
-
-func allocateDaemonPort(addr string) error {
-	return nil
-}
-
-// notifyShutdown is called after the daemon shuts down but before the process exits.
-func notifyShutdown(err error) {
-}
-
-func wrapListeners(proto string, ls []net.Listener) []net.Listener {
-	return ls
-}
diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go
index 7909d98d..41e6b61 100644
--- a/cmd/dockerd/daemon_unix.go
+++ b/cmd/dockerd/daemon_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!solaris
+// +build !windows
 
 package main
 
@@ -10,9 +10,11 @@
 	"path/filepath"
 	"strconv"
 
+	"github.com/containerd/containerd/linux"
 	"github.com/docker/docker/cmd/dockerd/hack"
 	"github.com/docker/docker/daemon"
 	"github.com/docker/docker/libcontainerd"
+	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/libnetwork/portallocator"
 	"golang.org/x/sys/unix"
 )
@@ -35,6 +37,39 @@
 	return "/etc/docker"
 }
 
+func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) {
+	// On older kernel, letting putting the containerd-shim in its own
+	// namespace will effectively prevent operations such as unlink, rename
+	// and remove on mountpoints that were present at the time the shim
+	// namespace was created. This would led to a famous EBUSY will trying to
+	// remove shm mounts.
+	var noNewNS bool
+	if !kernel.CheckKernelVersion(3, 18, 0) {
+		noNewNS = true
+	}
+
+	opts := []libcontainerd.RemoteOption{
+		libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust),
+		libcontainerd.WithPlugin("linux", &linux.Config{
+			Shim:          daemon.DefaultShimBinary,
+			Runtime:       daemon.DefaultRuntimeBinary,
+			RuntimeRoot:   filepath.Join(cli.Config.Root, "runc"),
+			ShimDebug:     cli.Config.Debug,
+			ShimNoMountNS: noNewNS,
+		}),
+	}
+	if cli.Config.Debug {
+		opts = append(opts, libcontainerd.WithLogLevel("debug"))
+	}
+	if cli.Config.ContainerdAddr != "" {
+		opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr))
+	} else {
+		opts = append(opts, libcontainerd.WithStartDaemon(true))
+	}
+
+	return opts, nil
+}
+
 // setupConfigReloadTrap configures the USR2 signal to reload the configuration.
 func (cli *DaemonCli) setupConfigReloadTrap() {
 	c := make(chan os.Signal, 1)
@@ -46,33 +81,6 @@
 	}()
 }
 
-func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption {
-	opts := []libcontainerd.RemoteOption{
-		libcontainerd.WithDebugLog(cli.Config.Debug),
-		libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust),
-	}
-	if cli.Config.ContainerdAddr != "" {
-		opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr))
-	} else {
-		opts = append(opts, libcontainerd.WithStartDaemon(true))
-	}
-	if daemon.UsingSystemd(cli.Config) {
-		args := []string{"--systemd-cgroup=true"}
-		opts = append(opts, libcontainerd.WithRuntimeArgs(args))
-	}
-	if cli.Config.LiveRestoreEnabled {
-		opts = append(opts, libcontainerd.WithLiveRestore(true))
-	}
-	opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary))
-	return opts
-}
-
-// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to
-// store their state.
-func (cli *DaemonCli) getLibcontainerdRoot() string {
-	return filepath.Join(cli.Config.ExecRoot, "libcontainerd")
-}
-
 // getSwarmRunRoot gets the root directory for swarm to store runtime state
 // For example, the control socket
 func (cli *DaemonCli) getSwarmRunRoot() string {
diff --git a/cmd/dockerd/daemon_unix_test.go b/cmd/dockerd/daemon_unix_test.go
index 5d99e51..475ff9e 100644
--- a/cmd/dockerd/daemon_unix_test.go
+++ b/cmd/dockerd/daemon_unix_test.go
@@ -1,7 +1,4 @@
-// +build !windows,!solaris
-
-// TODO: Create new file for Solaris which tests config parameters
-// as described in daemon/config_solaris.go
+// +build !windows
 
 package main
 
diff --git a/cmd/dockerd/daemon_windows.go b/cmd/dockerd/daemon_windows.go
index 77bade2..224c509 100644
--- a/cmd/dockerd/daemon_windows.go
+++ b/cmd/dockerd/daemon_windows.go
@@ -48,15 +48,20 @@
 	}
 }
 
+func (cli *DaemonCli) getPlatformRemoteOptions() ([]libcontainerd.RemoteOption, error) {
+	return nil, nil
+}
+
 // setupConfigReloadTrap configures a Win32 event to reload the configuration.
 func (cli *DaemonCli) setupConfigReloadTrap() {
 	go func() {
 		sa := windows.SecurityAttributes{
 			Length: 0,
 		}
-		ev, _ := windows.UTF16PtrFromString("Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()))
+		event := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid())
+		ev, _ := windows.UTF16PtrFromString(event)
 		if h, _ := windows.CreateEvent(&sa, 0, 0, ev); h != 0 {
-			logrus.Debugf("Config reload - waiting signal at %s", ev)
+			logrus.Debugf("Config reload - waiting signal at %s", event)
 			for {
 				windows.WaitForSingleObject(h, windows.INFINITE)
 				cli.reloadConfig()
@@ -65,17 +70,6 @@
 	}()
 }
 
-func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption {
-	return nil
-}
-
-// getLibcontainerdRoot gets the root directory for libcontainerd to store its
-// state. The Windows libcontainerd implementation does not need to write a spec
-// or state to disk, so this is a no-op.
-func (cli *DaemonCli) getLibcontainerdRoot() string {
-	return ""
-}
-
 // getSwarmRunRoot gets the root directory for swarm to store runtime state
 // For example, the control socket
 func (cli *DaemonCli) getSwarmRunRoot() string {
diff --git a/container/archive.go b/container/archive.go
index 56e6598..ec4c236 100644
--- a/container/archive.go
+++ b/container/archive.go
@@ -2,7 +2,6 @@
 
 import (
 	"os"
-	"path/filepath"
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/pkg/archive"
@@ -15,17 +14,20 @@
 // an error if the path points to outside the container's rootfs.
 func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) {
 	// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
-	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
+	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, container.BaseFS)
 	if err != nil {
 		return "", "", err
 	}
 
 	// Consider the given path as an absolute path in the container.
-	absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
+	absPath = archive.PreserveTrailingDotOrSeparator(
+		container.BaseFS.Join(string(container.BaseFS.Separator()), path),
+		path,
+		container.BaseFS.Separator())
 
 	// Split the absPath into its Directory and Base components. We will
 	// resolve the dir in the scope of the container then append the base.
-	dirPath, basePath := filepath.Split(absPath)
+	dirPath, basePath := container.BaseFS.Split(absPath)
 
 	resolvedDirPath, err := container.GetResourcePath(dirPath)
 	if err != nil {
@@ -34,8 +36,7 @@
 
 	// resolvedDirPath will have been cleaned (no trailing path separators) so
 	// we can manually join it with the base path element.
-	resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
-
+	resolvedPath = resolvedDirPath + string(container.BaseFS.Separator()) + basePath
 	return resolvedPath, absPath, nil
 }
 
@@ -44,7 +45,9 @@
 // resolved to a path on the host corresponding to the given absolute path
 // inside the container.
 func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) {
-	lstat, err := os.Lstat(resolvedPath)
+	driver := container.BaseFS
+
+	lstat, err := driver.Lstat(resolvedPath)
 	if err != nil {
 		return nil, err
 	}
@@ -57,17 +60,17 @@
 			return nil, err
 		}
 
-		linkTarget, err = filepath.Rel(container.BaseFS, hostPath)
+		linkTarget, err = driver.Rel(driver.Path(), hostPath)
 		if err != nil {
 			return nil, err
 		}
 
 		// Make it an absolute path.
-		linkTarget = filepath.Join(string(filepath.Separator), linkTarget)
+		linkTarget = driver.Join(string(driver.Separator()), linkTarget)
 	}
 
 	return &types.ContainerPathStat{
-		Name:       filepath.Base(absPath),
+		Name:       driver.Base(absPath),
 		Size:       lstat.Size(),
 		Mode:       lstat.Mode(),
 		Mtime:      lstat.ModTime(),
diff --git a/container/container.go b/container/container.go
index 188c017..3e8a370 100644
--- a/container/container.go
+++ b/container/container.go
@@ -15,6 +15,7 @@
 	"syscall"
 	"time"
 
+	"github.com/containerd/containerd"
 	containertypes "github.com/docker/docker/api/types/container"
 	mounttypes "github.com/docker/docker/api/types/mount"
 	networktypes "github.com/docker/docker/api/types/network"
@@ -28,6 +29,7 @@
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/signal"
@@ -50,24 +52,31 @@
 
 const configFileName = "config.v2.json"
 
-const (
-	// DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container.
-	DefaultStopTimeout = 10
-)
-
 var (
 	errInvalidEndpoint = errors.New("invalid endpoint while building port map info")
 	errInvalidNetwork  = errors.New("invalid network settings while building port map info")
 )
 
+// ExitStatus provides exit reasons for a container.
+type ExitStatus struct {
+	// The exit code with which the container exited.
+	ExitCode int
+
+	// Whether the container encountered an OOM.
+	OOMKilled bool
+
+	// Time at which the container died
+	ExitedAt time.Time
+}
+
 // Container holds the structure defining a container object.
 type Container struct {
 	StreamConfig *stream.Config
 	// embed for Container to support states directly.
-	*State          `json:"State"` // Needed for Engine API version <= 1.11
-	Root            string         `json:"-"` // Path to the "home" of the container, including metadata.
-	BaseFS          string         `json:"-"` // Path to the graphdriver mountpoint
-	RWLayer         layer.RWLayer  `json:"-"`
+	*State          `json:"State"`          // Needed for Engine API version <= 1.11
+	Root            string                  `json:"-"` // Path to the "home" of the container, including metadata.
+	BaseFS          containerfs.ContainerFS `json:"-"` // interface containing graphdriver mount
+	RWLayer         layer.RWLayer           `json:"-"`
 	ID              string
 	Created         time.Time
 	Managed         bool
@@ -79,7 +88,7 @@
 	LogPath         string
 	Name            string
 	Driver          string
-	Platform        string
+	OS              string
 	// MountLabel contains the options for the 'mount' command
 	MountLabel             string
 	ProcessLabel           string
@@ -146,11 +155,11 @@
 		return err
 	}
 
-	// Ensure the platform is set if blank. Assume it is the platform of the
-	// host OS if not, to ensure containers created before multiple-platform
+	// Ensure the operating system is set if blank. Assume it is the OS of the
+	// host OS if not, to ensure containers created before multiple-OS
 	// support are migrated
-	if container.Platform == "" {
-		container.Platform = runtime.GOOS
+	if container.OS == "" {
+		container.OS = runtime.GOOS
 	}
 
 	return container.readHostConfig()
@@ -263,7 +272,7 @@
 func (container *Container) SetupWorkingDirectory(rootIDs idtools.IDPair) error {
 	// TODO @jhowardmsft, @gupta-ak LCOW Support. This will need revisiting.
 	// We will need to do remote filesystem operations here.
-	if container.Platform != runtime.GOOS {
+	if container.OS != runtime.GOOS {
 		return nil
 	}
 
@@ -305,15 +314,13 @@
 func (container *Container) GetResourcePath(path string) (string, error) {
 	// IMPORTANT - These are paths on the OS where the daemon is running, hence
 	// any filepath operations must be done in an OS agnostic way.
-
-	cleanPath := cleanResourcePath(path)
-	r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS)
+	r, e := container.BaseFS.ResolveScopedPath(path, false)
 
 	// Log this here on the daemon side as there's otherwise no indication apart
 	// from the error being propagated all the way back to the client. This makes
 	// debugging significantly easier and clearly indicates the error comes from the daemon.
 	if e != nil {
-		logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e)
+		logrus.Errorf("Failed to ResolveScopedPath BaseFS %s path %s %s\n", container.BaseFS.Path(), path, e)
 	}
 	return r, e
 }
@@ -435,6 +442,11 @@
 
 // AddMountPointWithVolume adds a new mount point configured with a volume to the container.
 func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) {
+	operatingSystem := container.OS
+	if operatingSystem == "" {
+		operatingSystem = runtime.GOOS
+	}
+	volumeParser := volume.NewParser(operatingSystem)
 	container.MountPoints[destination] = &volume.MountPoint{
 		Type:        mounttypes.TypeVolume,
 		Name:        vol.Name(),
@@ -442,7 +454,7 @@
 		Destination: destination,
 		RW:          rw,
 		Volume:      vol,
-		CopyData:    volume.DefaultCopyMode,
+		CopyData:    volumeParser.DefaultCopyMode(),
 	}
 }
 
@@ -992,10 +1004,10 @@
 }
 
 // InitializeStdio is called by libcontainerd to connect the stdio.
-func (container *Container) InitializeStdio(iop libcontainerd.IOPipe) error {
+func (container *Container) InitializeStdio(iop *libcontainerd.IOPipe) (containerd.IO, error) {
 	if err := container.startLogging(); err != nil {
 		container.Reset(false)
-		return err
+		return nil, err
 	}
 
 	container.StreamConfig.CopyToPipe(iop)
@@ -1008,7 +1020,7 @@
 		}
 	}
 
-	return nil
+	return &cio{IO: iop, sc: container.StreamConfig}, nil
 }
 
 // SecretMountPath returns the path of the secret mount for the container
@@ -1043,15 +1055,14 @@
 // CreateDaemonEnvironment creates a new environment variable slice for this container.
 func (container *Container) CreateDaemonEnvironment(tty bool, linkedEnv []string) []string {
 	// Setup environment
-	// TODO @jhowardmsft LCOW Support. This will need revisiting later.
-	platform := container.Platform
-	if platform == "" {
-		platform = runtime.GOOS
+	os := container.OS
+	if os == "" {
+		os = runtime.GOOS
 	}
 	env := []string{}
-	if runtime.GOOS != "windows" || (system.LCOWSupported() && platform == "linux") {
+	if runtime.GOOS != "windows" || (runtime.GOOS == "windows" && os == "linux") {
 		env = []string{
-			"PATH=" + system.DefaultPathEnv(platform),
+			"PATH=" + system.DefaultPathEnv(os),
 			"HOSTNAME=" + container.Config.Hostname,
 		}
 		if tty {
@@ -1066,3 +1077,21 @@
 	env = ReplaceOrAppendEnvValues(env, container.Config.Env)
 	return env
 }
+
+type cio struct {
+	containerd.IO
+
+	sc *stream.Config
+}
+
+func (i *cio) Close() error {
+	i.IO.Close()
+
+	return i.sc.CloseStreams()
+}
+
+func (i *cio) Wait() {
+	i.sc.Wait()
+
+	i.IO.Wait()
+}
diff --git a/container/container_notlinux.go b/container/container_notlinux.go
index 768c762..246a146 100644
--- a/container/container_notlinux.go
+++ b/container/container_notlinux.go
@@ -1,4 +1,4 @@
-// +build solaris freebsd
+// +build freebsd
 
 package container
 
@@ -7,7 +7,7 @@
 )
 
 func detachMounted(path string) error {
-	//Solaris and FreeBSD do not support the lazy unmount or MNT_DETACH feature.
+	// FreeBSD do not support the lazy unmount or MNT_DETACH feature.
 	// Therefore there are separate definitions for this.
 	return unix.Unmount(path, 0)
 }
diff --git a/container/container_unix.go b/container/container_unix.go
index 8212cb9..7785194 100644
--- a/container/container_unix.go
+++ b/container/container_unix.go
@@ -1,11 +1,10 @@
-// +build linux freebsd solaris
+// +build linux freebsd
 
 package container
 
 import (
 	"io/ioutil"
 	"os"
-	"path/filepath"
 
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
@@ -13,7 +12,6 @@
 	"github.com/docker/docker/pkg/chrootarchive"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/stringid"
-	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/volume"
 	"github.com/opencontainers/selinux/go-selinux/label"
@@ -23,18 +21,12 @@
 )
 
 const (
+	// DefaultStopTimeout is the timeout (in seconds) for the syscall signal used to stop a container.
+	DefaultStopTimeout = 10
+
 	containerSecretMountPath = "/run/secrets"
 )
 
-// ExitStatus provides exit reasons for a container.
-type ExitStatus struct {
-	// The exit code with which the container exited.
-	ExitCode int
-
-	// Whether the container encountered an OOM.
-	OOMKilled bool
-}
-
 // TrySetNetworkMount attempts to set the network mounts given a provided destination and
 // the path to use for it; return true if the given destination was a network mount file
 func (container *Container) TrySetNetworkMount(destination string, path string) bool {
@@ -68,6 +60,7 @@
 func (container *Container) NetworkMounts() []Mount {
 	var mounts []Mount
 	shared := container.HostConfig.NetworkMode.IsContainer()
+	parser := volume.NewParser(container.OS)
 	if container.ResolvConfPath != "" {
 		if _, err := os.Stat(container.ResolvConfPath); err != nil {
 			logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err)
@@ -83,7 +76,7 @@
 				Source:      container.ResolvConfPath,
 				Destination: "/etc/resolv.conf",
 				Writable:    writable,
-				Propagation: string(volume.DefaultPropagationMode),
+				Propagation: string(parser.DefaultPropagationMode()),
 			})
 		}
 	}
@@ -102,7 +95,7 @@
 				Source:      container.HostnamePath,
 				Destination: "/etc/hostname",
 				Writable:    writable,
-				Propagation: string(volume.DefaultPropagationMode),
+				Propagation: string(parser.DefaultPropagationMode()),
 			})
 		}
 	}
@@ -121,7 +114,7 @@
 				Source:      container.HostsPath,
 				Destination: "/etc/hosts",
 				Writable:    writable,
-				Propagation: string(volume.DefaultPropagationMode),
+				Propagation: string(parser.DefaultPropagationMode()),
 			})
 		}
 	}
@@ -130,7 +123,7 @@
 
 // CopyImagePathContent copies files in destination to the volume.
 func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error {
-	rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS)
+	rootfs, err := container.GetResourcePath(destination)
 	if err != nil {
 		return err
 	}
@@ -196,6 +189,7 @@
 // IpcMounts returns the list of IPC mounts
 func (container *Container) IpcMounts() []Mount {
 	var mounts []Mount
+	parser := volume.NewParser(container.OS)
 
 	if container.HasMountFor("/dev/shm") {
 		return mounts
@@ -209,7 +203,7 @@
 		Source:      container.ShmPath,
 		Destination: "/dev/shm",
 		Writable:    true,
-		Propagation: string(volume.DefaultPropagationMode),
+		Propagation: string(parser.DefaultPropagationMode()),
 	})
 
 	return mounts
@@ -429,6 +423,7 @@
 
 // TmpfsMounts returns the list of tmpfs mounts
 func (container *Container) TmpfsMounts() ([]Mount, error) {
+	parser := volume.NewParser(container.OS)
 	var mounts []Mount
 	for dest, data := range container.HostConfig.Tmpfs {
 		mounts = append(mounts, Mount{
@@ -439,7 +434,7 @@
 	}
 	for dest, mnt := range container.MountPoints {
 		if mnt.Type == mounttypes.TypeTmpfs {
-			data, err := volume.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly)
+			data, err := parser.ConvertTmpfsOptions(mnt.Spec.TmpfsOptions, mnt.Spec.ReadOnly)
 			if err != nil {
 				return nil, err
 			}
@@ -453,11 +448,6 @@
 	return mounts, nil
 }
 
-// cleanResourcePath cleans a resource path and prepares to combine with mnt path
-func cleanResourcePath(path string) string {
-	return filepath.Join(string(os.PathSeparator), path)
-}
-
 // EnableServiceDiscoveryOnDefaultNetwork Enable service discovery on default network
 func (container *Container) EnableServiceDiscoveryOnDefaultNetwork() bool {
 	return false
diff --git a/container/container_windows.go b/container/container_windows.go
index 9c52d00..5cb2e45 100644
--- a/container/container_windows.go
+++ b/container/container_windows.go
@@ -16,13 +16,10 @@
 	containerSecretMountPath         = `C:\ProgramData\Docker\secrets`
 	containerInternalSecretMountPath = `C:\ProgramData\Docker\internal\secrets`
 	containerInternalConfigsDirPath  = `C:\ProgramData\Docker\internal\configs`
-)
 
-// ExitStatus provides exit reasons for a container.
-type ExitStatus struct {
-	// The exit code with which the container exited.
-	ExitCode int
-}
+	// DefaultStopTimeout is the timeout (in seconds) for the shutdown call on a container
+	DefaultStopTimeout = 30
+)
 
 // UnmountIpcMount unmounts Ipc related mounts.
 // This is a NOOP on windows.
@@ -172,18 +169,6 @@
 	return nil
 }
 
-// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares
-// to combine with a volume path
-func cleanResourcePath(path string) string {
-	if len(path) >= 2 {
-		c := path[0]
-		if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
-			path = path[2:]
-		}
-	}
-	return filepath.Join(string(os.PathSeparator), path)
-}
-
 // BuildHostnameFile writes the container's hostname file.
 func (container *Container) BuildHostnameFile() error {
 	return nil
diff --git a/container/state.go b/container/state.go
index 32f3f5b..3af4015 100644
--- a/container/state.go
+++ b/container/state.go
@@ -29,7 +29,7 @@
 	Dead              bool
 	Pid               int
 	ExitCodeValue     int    `json:"ExitCode"`
-	ErrorMsg          string `json:"Error"` // contains last known error when starting the container
+	ErrorMsg          string `json:"Error"` // contains last known error during container start or remove
 	StartedAt         time.Time
 	FinishedAt        time.Time
 	Health            *Health
@@ -276,6 +276,7 @@
 // SetRunning sets the state of the container to "running".
 func (s *State) SetRunning(pid int, initial bool) {
 	s.ErrorMsg = ""
+	s.Paused = false
 	s.Running = true
 	s.Restarting = false
 	if initial {
@@ -294,9 +295,14 @@
 	s.Paused = false
 	s.Restarting = false
 	s.Pid = 0
-	s.FinishedAt = time.Now().UTC()
-	s.setFromExitStatus(exitStatus)
-	close(s.waitStop) // Fire waiters for stop
+	if exitStatus.ExitedAt.IsZero() {
+		s.FinishedAt = time.Now().UTC()
+	} else {
+		s.FinishedAt = exitStatus.ExitedAt
+	}
+	s.ExitCodeValue = exitStatus.ExitCode
+	s.OOMKilled = exitStatus.OOMKilled
+	close(s.waitStop) // fire waiters for stop
 	s.waitStop = make(chan struct{})
 }
 
@@ -310,8 +316,9 @@
 	s.Paused = false
 	s.Pid = 0
 	s.FinishedAt = time.Now().UTC()
-	s.setFromExitStatus(exitStatus)
-	close(s.waitStop) // Fire waiters for stop
+	s.ExitCodeValue = exitStatus.ExitCode
+	s.OOMKilled = exitStatus.OOMKilled
+	close(s.waitStop) // fire waiters for stop
 	s.waitStop = make(chan struct{})
 }
 
@@ -319,7 +326,10 @@
 // know the error that occurred when container transits to another state
 // when inspecting it
 func (s *State) SetError(err error) {
-	s.ErrorMsg = err.Error()
+	s.ErrorMsg = ""
+	if err != nil {
+		s.ErrorMsg = err.Error()
+	}
 }
 
 // IsPaused returns whether the container is paused or not.
@@ -357,6 +367,15 @@
 	s.Unlock()
 }
 
+// IsRemovalInProgress returns whether the RemovalInProgress flag is set.
+// Used by Container to check whether a container is being removed.
+func (s *State) IsRemovalInProgress() bool {
+	s.Lock()
+	res := s.RemovalInProgress
+	s.Unlock()
+	return res
+}
+
 // SetDead sets the container state to "dead"
 func (s *State) SetDead() {
 	s.Lock()
@@ -364,12 +383,30 @@
 	s.Unlock()
 }
 
+// IsDead returns whether the Dead flag is set. Used by Container to check whether a container is dead.
+func (s *State) IsDead() bool {
+	s.Lock()
+	res := s.Dead
+	s.Unlock()
+	return res
+}
+
 // SetRemoved assumes this container is already in the "dead" state and
 // closes the internal waitRemove channel to unblock callers waiting for a
 // container to be removed.
 func (s *State) SetRemoved() {
+	s.SetRemovalError(nil)
+}
+
+// SetRemovalError is to be called in case a container remove failed.
+// It sets an error and closes the internal waitRemove channel to unblock
+// callers waiting for the container to be removed.
+func (s *State) SetRemovalError(err error) {
+	s.SetError(err)
 	s.Lock()
 	close(s.waitRemove) // Unblock those waiting on remove.
+	// Recreate the channel so next ContainerWait will work
+	s.waitRemove = make(chan struct{})
 	s.Unlock()
 }
 
diff --git a/container/state_test.go b/container/state_test.go
index 2a90e55..14f2460 100644
--- a/container/state_test.go
+++ b/container/state_test.go
@@ -166,3 +166,27 @@
 		}
 	}
 }
+
+func TestIsValidStateString(t *testing.T) {
+	states := []struct {
+		state    string
+		expected bool
+	}{
+		{"paused", true},
+		{"restarting", true},
+		{"running", true},
+		{"dead", true},
+		{"start", false},
+		{"created", true},
+		{"exited", true},
+		{"removing", true},
+		{"stop", false},
+	}
+
+	for _, s := range states {
+		v := IsValidStateString(s.state)
+		if v != s.expected {
+			t.Fatalf("Expected %t, but got %t", s.expected, v)
+		}
+	}
+}
diff --git a/container/state_unix.go b/container/state_unix.go
deleted file mode 100644
index a2fa5af..0000000
--- a/container/state_unix.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build linux freebsd
-
-package container
-
-// setFromExitStatus is a platform specific helper function to set the state
-// based on the ExitStatus structure.
-func (s *State) setFromExitStatus(exitStatus *ExitStatus) {
-	s.ExitCodeValue = exitStatus.ExitCode
-	s.OOMKilled = exitStatus.OOMKilled
-}
diff --git a/container/state_windows.go b/container/state_windows.go
deleted file mode 100644
index 1229650..0000000
--- a/container/state_windows.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package container
-
-// setFromExitStatus is a platform specific helper function to set the state
-// based on the ExitStatus structure.
-func (s *State) setFromExitStatus(exitStatus *ExitStatus) {
-	s.ExitCodeValue = exitStatus.ExitCode
-}
diff --git a/container/stream/attach.go b/container/stream/attach.go
index 24b6886..4b4a454 100644
--- a/container/stream/attach.go
+++ b/container/stream/attach.go
@@ -7,7 +7,6 @@
 	"golang.org/x/net/context"
 
 	"github.com/docker/docker/pkg/pools"
-	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/term"
 	"github.com/sirupsen/logrus"
 )
@@ -58,7 +57,7 @@
 }
 
 // CopyStreams starts goroutines to copy data in and out to/from the container
-func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) chan error {
+func (c *Config) CopyStreams(ctx context.Context, cfg *AttachConfig) <-chan error {
 	var (
 		wg     sync.WaitGroup
 		errors = make(chan error, 3)
@@ -137,35 +136,42 @@
 	go attachStream("stdout", cfg.Stdout, cfg.CStdout)
 	go attachStream("stderr", cfg.Stderr, cfg.CStderr)
 
-	return promise.Go(func() error {
-		done := make(chan struct{})
-		go func() {
-			wg.Wait()
-			close(done)
+	errs := make(chan error, 1)
+
+	go func() {
+		defer close(errs)
+		errs <- func() error {
+			done := make(chan struct{})
+			go func() {
+				wg.Wait()
+				close(done)
+			}()
+			select {
+			case <-done:
+			case <-ctx.Done():
+				// close all pipes
+				if cfg.CStdin != nil {
+					cfg.CStdin.Close()
+				}
+				if cfg.CStdout != nil {
+					cfg.CStdout.Close()
+				}
+				if cfg.CStderr != nil {
+					cfg.CStderr.Close()
+				}
+				<-done
+			}
+			close(errors)
+			for err := range errors {
+				if err != nil {
+					return err
+				}
+			}
+			return nil
 		}()
-		select {
-		case <-done:
-		case <-ctx.Done():
-			// close all pipes
-			if cfg.CStdin != nil {
-				cfg.CStdin.Close()
-			}
-			if cfg.CStdout != nil {
-				cfg.CStdout.Close()
-			}
-			if cfg.CStderr != nil {
-				cfg.CStderr.Close()
-			}
-			<-done
-		}
-		close(errors)
-		for err := range errors {
-			if err != nil {
-				return err
-			}
-		}
-		return nil
-	})
+	}()
+
+	return errs
 }
 
 func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) {
diff --git a/container/stream/streams.go b/container/stream/streams.go
index 7e734d8..106e2b1 100644
--- a/container/stream/streams.go
+++ b/container/stream/streams.go
@@ -114,12 +114,12 @@
 }
 
 // CopyToPipe connects streamconfig with a libcontainerd.IOPipe
-func (c *Config) CopyToPipe(iop libcontainerd.IOPipe) {
+func (c *Config) CopyToPipe(iop *libcontainerd.IOPipe) {
 	copyFunc := func(w io.Writer, r io.ReadCloser) {
 		c.Add(1)
 		go func() {
 			if _, err := pools.Copy(w, r); err != nil {
-				logrus.Errorf("stream copy error: %+v", err)
+				logrus.Errorf("stream copy error: %v", err)
 			}
 			r.Close()
 			c.Done()
@@ -138,7 +138,7 @@
 			go func() {
 				pools.Copy(iop.Stdin, stdin)
 				if err := iop.Stdin.Close(); err != nil {
-					logrus.Warnf("failed to close stdin: %+v", err)
+					logrus.Warnf("failed to close stdin: %v", err)
 				}
 			}()
 		}
diff --git a/container/view.go b/container/view.go
index 8d88526..164827c 100644
--- a/container/view.go
+++ b/container/view.go
@@ -203,10 +203,7 @@
 // Once released, a name can be reserved again
 func (db *memDB) ReleaseName(name string) error {
 	return db.withTxn(func(txn *memdb.Txn) error {
-		if err := txn.Delete(memdbNamesTable, nameAssociation{name: name}); err != nil {
-			return err
-		}
-		return nil
+		return txn.Delete(memdbNamesTable, nameAssociation{name: name})
 	})
 }
 
diff --git a/contrib/builder/deb/aarch64/debian-jessie/Dockerfile b/contrib/builder/deb/aarch64/debian-jessie/Dockerfile
index 0e80da4..865d6aa 100644
--- a/contrib/builder/deb/aarch64/debian-jessie/Dockerfile
+++ b/contrib/builder/deb/aarch64/debian-jessie/Dockerfile
@@ -5,21 +5,13 @@
 FROM aarch64/debian:jessie
 
 RUN echo deb http://ftp.debian.org/debian jessie-backports main > /etc/apt/sources.list.d/backports.list
-RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100
-
-# Install Go
-# aarch64 doesn't have official go binaries, so use the version of go installed from
-# the image to build go from source.
-ENV GO_VERSION 1.8.3
-RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
-	&& cd /usr/src/go/src \
-	&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
-
-ENV PATH /usr/src/go/bin:$PATH
+ENV GO_VERSION 1.8.5
+RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
 
 ENV AUTO_GOPATH 1
 
-ENV DOCKER_BUILDTAGS apparmor selinux seccomp
-ENV RUNC_BUILDTAGS apparmor selinux seccomp
+ENV DOCKER_BUILDTAGS apparmor seccomp selinux
+ENV RUNC_BUILDTAGS apparmor seccomp selinux
diff --git a/contrib/builder/deb/aarch64/debian-stretch/Dockerfile b/contrib/builder/deb/aarch64/debian-stretch/Dockerfile
index caee191..2b561be 100644
--- a/contrib/builder/deb/aarch64/debian-stretch/Dockerfile
+++ b/contrib/builder/deb/aarch64/debian-stretch/Dockerfile
@@ -4,19 +4,13 @@
 
 FROM aarch64/debian:stretch
 
-RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-# Install Go
-# aarch64 doesn't have official go binaries, so use the version of go installed from
-# the image to build go from source.
-ENV GO_VERSION 1.8.3
-RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
-	&& cd /usr/src/go/src \
-	&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
-
-ENV PATH /usr/src/go/bin:$PATH
+ENV GO_VERSION 1.8.5
+RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
 
 ENV AUTO_GOPATH 1
 
-ENV DOCKER_BUILDTAGS apparmor selinux seccomp
-ENV RUNC_BUILDTAGS apparmor selinux seccomp
+ENV DOCKER_BUILDTAGS apparmor seccomp selinux
+ENV RUNC_BUILDTAGS apparmor seccomp selinux
diff --git a/contrib/builder/deb/aarch64/generate.sh b/contrib/builder/deb/aarch64/generate.sh
index 7c9217d..bc0b699 100755
--- a/contrib/builder/deb/aarch64/generate.sh
+++ b/contrib/builder/deb/aarch64/generate.sh
@@ -38,7 +38,7 @@
 
 	EOF
 
-	dockerBuildTags='apparmor selinux'
+	extraBuildTags='apparmor selinux'
 	runcBuildTags='apparmor selinux'
 
 	# this list is sorted alphabetically; please keep it that way
@@ -62,29 +62,20 @@
 	case "$suite" in
 		trusty)
 			packages+=( libsystemd-journal-dev )
-			# aarch64 doesn't have an official downloadable binary for go.
-			# And gccgo for trusty only includes Go 1.2 implementation which
-			# is too old to build current go source, fortunately trusty has
-			# golang-1.6-go package can be used as bootstrap.
-			packages+=( golang-1.6-go )
 			;;
 		jessie)
 			packages+=( libsystemd-journal-dev )
-			# aarch64 doesn't have an official downloadable binary for go.
-			# And gccgo for jessie only includes Go 1.2 implementation which
-			# is too old to build current go source, fortunately jessie backports
-			# has golang-1.6-go package can be used as bootstrap.
-			packages+=( golang-1.6-go libseccomp-dev )
+			packages+=( libseccomp-dev )
 
-			dockerBuildTags="$dockerBuildTags seccomp"
-			runcBuildTags="$runcBuildTags seccomp"
+			extraBuildTags+=' seccomp'
+			runcBuildTags+=' seccomp'
 			;;
 		stretch|xenial)
 			packages+=( libsystemd-dev )
-			packages+=( golang-go libseccomp-dev )
+			packages+=( libseccomp-dev )
 
-			dockerBuildTags="$dockerBuildTags seccomp"
-			runcBuildTags="$runcBuildTags seccomp"
+			extraBuildTags+=' seccomp'
+			runcBuildTags+=' seccomp'
 			;;
 		*)
 			echo "Unsupported distro:" $distro:$suite
@@ -105,31 +96,17 @@
 	echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile"
 	echo >> "$version/Dockerfile"
 
-	case "$suite" in
-		jessie|trusty)
-			echo 'RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100' >> "$version/Dockerfile"
-			echo >> "$version/Dockerfile"
-			;;
-		*)
-			;;
-	esac
-
-	echo "# Install Go" >> "$version/Dockerfile"
-	echo "# aarch64 doesn't have official go binaries, so use the version of go installed from" >> "$version/Dockerfile"
-	echo "# the image to build go from source." >> "$version/Dockerfile"
-
 	awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile.aarch64 >> "$version/Dockerfile"
-	echo 'RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \' >> "$version/Dockerfile"
-	echo '	&& cd /usr/src/go/src \' >> "$version/Dockerfile"
-	echo '	&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash' >> "$version/Dockerfile"
+	echo 'RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile"
+	echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile"
 	echo >> "$version/Dockerfile"
 
-	echo 'ENV PATH /usr/src/go/bin:$PATH' >> "$version/Dockerfile"
+	echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile"
 	echo >> "$version/Dockerfile"
 
-	echo "ENV AUTO_GOPATH 1" >> "$version/Dockerfile"
-	echo >> "$version/Dockerfile"
-
-	echo "ENV DOCKER_BUILDTAGS $dockerBuildTags" >> "$version/Dockerfile"
+	# print build tags in alphabetical order
+	buildTags=$( echo "$extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' )
+	runcBuildTags=$( echo "$runcBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' )
+	echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile"
 	echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile"
 done
diff --git a/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile b/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile
index 6f4a3e9..e1b85ec 100644
--- a/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/aarch64/ubuntu-trusty/Dockerfile
@@ -4,19 +4,11 @@
 
 FROM aarch64/ubuntu:trusty
 
-RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev golang-1.6-go --no-install-recommends && rm -rf /var/lib/apt/lists/*
+RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-RUN update-alternatives --install /usr/bin/go go /usr/lib/go-1.6/bin/go 100
-
-# Install Go
-# aarch64 doesn't have official go binaries, so use the version of go installed from
-# the image to build go from source.
-ENV GO_VERSION 1.8.3
-RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
-	&& cd /usr/src/go/src \
-	&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
-
-ENV PATH /usr/src/go/bin:$PATH
+ENV GO_VERSION 1.8.5
+RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
 
 ENV AUTO_GOPATH 1
 
diff --git a/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile b/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile
index 19a510b..6f8bc95 100644
--- a/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/aarch64/ubuntu-xenial/Dockerfile
@@ -4,19 +4,13 @@
 
 FROM aarch64/ubuntu:xenial
 
-RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev golang-go libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
+RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-dev libseccomp-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-# Install Go
-# aarch64 doesn't have official go binaries, so use the version of go installed from
-# the image to build go from source.
-ENV GO_VERSION 1.8.3
-RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \
-	&& cd /usr/src/go/src \
-	&& GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash
-
-ENV PATH /usr/src/go/bin:$PATH
+ENV GO_VERSION 1.8.5
+RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-arm64.tar.gz" | tar xzC /usr/local
+ENV PATH $PATH:/usr/local/go/bin
 
 ENV AUTO_GOPATH 1
 
-ENV DOCKER_BUILDTAGS apparmor selinux seccomp
-ENV RUNC_BUILDTAGS apparmor selinux seccomp
+ENV DOCKER_BUILDTAGS apparmor seccomp selinux
+ENV RUNC_BUILDTAGS apparmor seccomp selinux
diff --git a/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/contrib/builder/deb/amd64/debian-jessie/Dockerfile
index 9b6233c..668fc3c 100644
--- a/contrib/builder/deb/amd64/debian-jessie/Dockerfile
+++ b/contrib/builder/deb/amd64/debian-jessie/Dockerfile
@@ -10,7 +10,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/contrib/builder/deb/amd64/debian-stretch/Dockerfile
index d95c194..a23eba3 100644
--- a/contrib/builder/deb/amd64/debian-stretch/Dockerfile
+++ b/contrib/builder/deb/amd64/debian-stretch/Dockerfile
@@ -10,7 +10,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/contrib/builder/deb/amd64/debian-wheezy/Dockerfile
index 56763a5..2652706 100644
--- a/contrib/builder/deb/amd64/debian-wheezy/Dockerfile
+++ b/contrib/builder/deb/amd64/debian-wheezy/Dockerfile
@@ -12,7 +12,7 @@
 RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/*
 RUN apt-get update && apt-get install -y apparmor bash-completion  build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile
index f620116..4fce6f3 100644
--- a/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile
index 6b9370d..ed9c4a9 100644
--- a/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile
index 232c86f..a7dd9b7 100644
--- a/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile
+++ b/contrib/builder/deb/amd64/ubuntu-yakkety/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile b/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile
index 0b4e45b..5074efe 100644
--- a/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile
+++ b/contrib/builder/deb/amd64/ubuntu-zesty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/contrib/builder/deb/armhf/debian-jessie/Dockerfile
index 9a50cd1..558d353 100644
--- a/contrib/builder/deb/armhf/debian-jessie/Dockerfile
+++ b/contrib/builder/deb/armhf/debian-jessie/Dockerfile
@@ -10,7 +10,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile
index 810cb2a..31a6688 100644
--- a/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile
+++ b/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile
@@ -10,7 +10,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 # GOARM is the ARM architecture version which is unrelated to the above Golang version
 ENV GOARM 6
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
diff --git a/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile
index 530f9fc..a9899a0 100644
--- a/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev  pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile b/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile
index a7f79ee..2766f33 100644
--- a/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/armhf/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile
index 69f1bed..27edd04 100644
--- a/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile
+++ b/contrib/builder/deb/armhf/ubuntu-yakkety/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config vim-common libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile
index 0841a99..b85a68e 100644
--- a/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile
+++ b/contrib/builder/deb/ppc64le/ubuntu-trusty/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile
index f9dd250..abb5b23 100644
--- a/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/ppc64le/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile
index 539969d..d725816 100644
--- a/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile
+++ b/contrib/builder/deb/ppc64le/ubuntu-yakkety/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev pkg-config vim-common libseccomp-dev libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile b/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile
index 2ed0ad3..6d61ed7 100644
--- a/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile
+++ b/contrib/builder/deb/s390x/ubuntu-xenial/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile b/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile
index af79ff5..e30e875 100644
--- a/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile
+++ b/contrib/builder/deb/s390x/ubuntu-yakkety/Dockerfile
@@ -6,7 +6,7 @@
 
 RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential cmake curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libseccomp-dev pkg-config libsystemd-dev vim-common --no-install-recommends && rm -rf /var/lib/apt/lists/*
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile b/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile
index 7632daf..8e755cd 100644
--- a/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile
+++ b/contrib/builder/rpm/amd64/amazonlinux-latest/Dockerfile
@@ -7,7 +7,7 @@
 RUN yum groupinstall -y "Development Tools"
 RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel  tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/centos-7/Dockerfile b/contrib/builder/rpm/amd64/centos-7/Dockerfile
index 82cb915..8a22064 100644
--- a/contrib/builder/rpm/amd64/centos-7/Dockerfile
+++ b/contrib/builder/rpm/amd64/centos-7/Dockerfile
@@ -8,7 +8,7 @@
 RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs
 RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/contrib/builder/rpm/amd64/fedora-24/Dockerfile
index cdb922c..e0b369a 100644
--- a/contrib/builder/rpm/amd64/fedora-24/Dockerfile
+++ b/contrib/builder/rpm/amd64/fedora-24/Dockerfile
@@ -8,7 +8,7 @@
 RUN dnf install -y @development-tools fedora-packager
 RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/fedora-25/Dockerfile b/contrib/builder/rpm/amd64/fedora-25/Dockerfile
index 73e9e9a..f259a5c 100644
--- a/contrib/builder/rpm/amd64/fedora-25/Dockerfile
+++ b/contrib/builder/rpm/amd64/fedora-25/Dockerfile
@@ -8,7 +8,7 @@
 RUN dnf install -y @development-tools fedora-packager
 RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile
index af97fdc..7f13863 100644
--- a/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile
+++ b/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile
@@ -7,7 +7,7 @@
 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build
 RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim systemd-rpm-macros
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile
index 8e15365..b75f2dc 100644
--- a/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile
+++ b/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile
@@ -10,7 +10,7 @@
 RUN yum groupinstall -y "Development Tools"
 RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel  tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile
index 7e5a832..f4dc894 100644
--- a/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile
+++ b/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile
@@ -7,7 +7,7 @@
 RUN yum groupinstall -y "Development Tools"
 RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/amd64/photon-1.0/Dockerfile b/contrib/builder/rpm/amd64/photon-1.0/Dockerfile
index 6b489d7..01d5fea 100644
--- a/contrib/builder/rpm/amd64/photon-1.0/Dockerfile
+++ b/contrib/builder/rpm/amd64/photon-1.0/Dockerfile
@@ -7,7 +7,7 @@
 RUN tdnf install -y wget curl ca-certificates gzip make rpm-build sed gcc linux-api-headers glibc-devel binutils libseccomp elfutils
 RUN tdnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/armhf/centos-7/Dockerfile b/contrib/builder/rpm/armhf/centos-7/Dockerfile
index f482fa7..79c2ef1 100644
--- a/contrib/builder/rpm/armhf/centos-7/Dockerfile
+++ b/contrib/builder/rpm/armhf/centos-7/Dockerfile
@@ -9,7 +9,7 @@
 RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs
 RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/ppc64le/centos-7/Dockerfile b/contrib/builder/rpm/ppc64le/centos-7/Dockerfile
index 95da278..ebce3c0 100644
--- a/contrib/builder/rpm/ppc64le/centos-7/Dockerfile
+++ b/contrib/builder/rpm/ppc64le/centos-7/Dockerfile
@@ -8,7 +8,7 @@
 RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs
 RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile b/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile
index 70a9f5d..18dd7d4 100644
--- a/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile
+++ b/contrib/builder/rpm/ppc64le/fedora-24/Dockerfile
@@ -8,7 +8,7 @@
 RUN dnf install -y @development-tools fedora-packager
 RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel systemd-devel tar git cmake
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile b/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile
index d11caa4..3343f02 100644
--- a/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile
+++ b/contrib/builder/rpm/ppc64le/opensuse-42.1/Dockerfile
@@ -9,7 +9,7 @@
 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build
 RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fSL "https://golang.org/dl/go${GO_VERSION}.linux-ppc64le.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile b/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile
index 1310b21..ef875b8 100644
--- a/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile
+++ b/contrib/builder/rpm/s390x/clefos-base-s390x-7/Dockerfile
@@ -8,7 +8,7 @@
 RUN touch /var/lib/rpm/* && yum groupinstall -y "Development Tools"
 RUN touch /var/lib/rpm/* && yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim-common
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile b/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile
index 05cad42..a05bee5 100644
--- a/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile
+++ b/contrib/builder/rpm/s390x/opensuse-tumbleweed-1/Dockerfile
@@ -9,7 +9,7 @@
 RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build
 RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static  libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git cmake vim systemd-rpm-macros
 
-ENV GO_VERSION 1.8.3
+ENV GO_VERSION 1.8.5
 RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" | tar xzC /usr/local
 ENV PATH $PATH:/usr/local/go/bin
 
diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go
index 8f62515..d3ec46a 100644
--- a/contrib/docker-device-tool/device_tool.go
+++ b/contrib/docker-device-tool/device_tool.go
@@ -1,4 +1,4 @@
-// +build !windows,!solaris
+// +build !windows
 
 package main
 
@@ -90,14 +90,12 @@
 		fmt.Printf("Sector size: %d\n", status.SectorSize)
 		fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total))
 		fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total))
-		break
 	case "list":
 		ids := devices.List()
 		sort.Strings(ids)
 		for _, id := range ids {
 			fmt.Println(id)
 		}
-		break
 	case "device":
 		if flag.NArg() < 2 {
 			usage()
@@ -113,7 +111,6 @@
 		fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors)
 		fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors)
 		fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector)
-		break
 	case "resize":
 		if flag.NArg() < 2 {
 			usage()
@@ -131,7 +128,6 @@
 			os.Exit(1)
 		}
 
-		break
 	case "snap":
 		if flag.NArg() < 3 {
 			usage()
@@ -142,7 +138,6 @@
 			fmt.Println("Can't create snap device: ", err)
 			os.Exit(1)
 		}
-		break
 	case "remove":
 		if flag.NArg() < 2 {
 			usage()
@@ -153,7 +148,6 @@
 			fmt.Println("Can't remove device: ", err)
 			os.Exit(1)
 		}
-		break
 	case "mount":
 		if flag.NArg() < 3 {
 			usage()
@@ -164,13 +158,10 @@
 			fmt.Println("Can't mount device: ", err)
 			os.Exit(1)
 		}
-		break
 	default:
 		fmt.Printf("Unknown command %s\n", args[0])
 		usage()
 
 		os.Exit(1)
 	}
-
-	return
 }
diff --git a/contrib/httpserver/Dockerfile.solaris b/contrib/httpserver/Dockerfile.solaris
deleted file mode 100644
index 3d0d691..0000000
--- a/contrib/httpserver/Dockerfile.solaris
+++ /dev/null
@@ -1,4 +0,0 @@
-FROM solaris
-EXPOSE 80/tcp
-COPY httpserver .
-CMD ["./httpserver"]
diff --git a/contrib/mkimage-alpine.sh b/contrib/mkimage-alpine.sh
index a271eff..03180e4 100755
--- a/contrib/mkimage-alpine.sh
+++ b/contrib/mkimage-alpine.sh
@@ -47,7 +47,7 @@
 }
 
 save() {
-	[ $SAVE -eq 1 ] || return
+	[ $SAVE -eq 1 ] || return 0
 
 	tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz
 }
diff --git a/contrib/mkimage.sh b/contrib/mkimage.sh
index 13298c8..ae05d13 100755
--- a/contrib/mkimage.sh
+++ b/contrib/mkimage.sh
@@ -11,7 +11,6 @@
 	echo >&2 "       $mkimg -t someuser/centos:5 rinse --distribution centos-5"
 	echo >&2 "       $mkimg -t someuser/mageia:4 mageia-urpmi --version=4"
 	echo >&2 "       $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/"
-	echo >&2 "       $mkimg -t someuser/solaris solaris" 
 	exit 1
 }
 
@@ -20,13 +19,6 @@
 os=
 os=$(uname -o)
 
-# set up path to gnu tools if solaris
-[[ $os == "Solaris" ]] && export PATH=/usr/gnu/bin:$PATH 
-# TODO check for gnu-tar, gnu-getopt
-
-# TODO requires root/sudo due to some pkg operations. sigh.
-[[ $os == "Solaris" && $EUID != "0" ]] && echo >&2 "image create on Solaris requires superuser privilege"
-
 optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@")
 eval set -- "$optTemp"
 unset optTemp
diff --git a/contrib/mkimage/solaris b/contrib/mkimage/solaris
deleted file mode 100755
index 158970e..0000000
--- a/contrib/mkimage/solaris
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env bash
-#
-# Solaris 12 base image build script. 
-#
-set -e
-
-# TODO add optional package publisher origin
-
-rootfsDir="$1"
-shift
-
-# base install
-(
-	set -x
-
-	pkg image-create --full --zone \
-		--facet facet.locale.*=false \
-		--facet facet.locale.POSIX=true \
-		--facet facet.doc=false \
-		--facet facet.doc.*=false \
-		"$rootfsDir"
-
-	pkg -R "$rootfsDir" set-property use-system-repo true
-
-	pkg -R "$rootfsDir" set-property flush-content-cache-on-success true
-
-	pkg -R "$rootfsDir" install core-os
-)
-
-# Lay in stock configuration, set up milestone
-# XXX This all may become optional in a base image
-(
-	# faster to build repository database on tmpfs
-	REPO_DB=/system/volatile/repository.$$
-	export SVCCFG_REPOSITORY=${REPO_DB}
-	export SVCCFG_DOOR_PATH=$rootfsDir/system/volatile/tmp_repo_door
-
-	# Import base manifests. NOTE These are a combination of basic requirement
-	# and gleaned from container milestone manifest. They may change.
-	for m in $rootfsDir/lib/svc/manifest/system/environment.xml \
-		$rootfsDir/lib/svc/manifest/system/svc/global.xml \
-		$rootfsDir/lib/svc/manifest/system/svc/restarter.xml \
-		$rootfsDir/lib/svc/manifest/network/dns/client.xml \
-		$rootfsDir/lib/svc/manifest/system/name-service/switch.xml \
-		$rootfsDir/lib/svc/manifest/system/name-service/cache.xml \
-		$rootfsDir/lib/svc/manifest/milestone/container.xml ; do
-		svccfg import $m
-	done
-
-	# Apply system layer profile, deleting unnecessary dependencies
-	svccfg apply $rootfsDir/etc/svc/profile/generic_container.xml 
-
-	# XXX Even if we keep a repo in the base image, this is definitely optional
-	svccfg apply $rootfsDir/etc/svc/profile/sysconfig/container_sc.xml
-
-	for s in svc:/system/svc/restarter \
-		svc:/system/environment \
-		svc:/network/dns/client \
-		svc:/system/name-service/switch \
-		svc:/system/name-service/cache \
-		svc:/system/svc/global \
-		svc:/milestone/container ;do
-		svccfg -s $s refresh
-	done
-
-	# now copy the built up repository into the base rootfs
-	mv $REPO_DB $rootfsDir/etc/svc/repository.db
-)
-
-# pkg(1) needs the zoneproxy-client running in the container.
-# use a simple wrapper to run it as needed.
-# XXX maybe we go back to running this in SMF?
-mv "$rootfsDir/usr/bin/pkg" "$rootfsDir/usr/bin/wrapped_pkg"
-cat > "$rootfsDir/usr/bin/pkg" <<-'EOF'
-#!/bin/sh
-#
-# THIS FILE CREATED DURING DOCKER BASE IMAGE CREATION
-# 
-# The Solaris base image uses the sysrepo proxy mechanism. The
-# IPS client pkg(1) requires the zoneproxy-client to reach the
-# remote publisher origins through the host. This wrapper script
-# enables and disables the proxy client as needed. This is a
-# temporary solution.
-
-/usr/lib/zones/zoneproxy-client -s localhost:1008
-PKG_SYSREPO_URL=http://localhost:1008 /usr/bin/wrapped_pkg "$@"
-pkill -9 zoneproxy-client
-EOF
-chmod +x "$rootfsDir/usr/bin/pkg"
diff --git a/contrib/syntax/vim/ftdetect/dockerfile.vim b/contrib/syntax/vim/ftdetect/dockerfile.vim
index ee10e5d..f7a962e 100644
--- a/contrib/syntax/vim/ftdetect/dockerfile.vim
+++ b/contrib/syntax/vim/ftdetect/dockerfile.vim
@@ -1 +1 @@
-au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile
+au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.*,*.Dockerfile set filetype=dockerfile
diff --git a/daemon/archive.go b/daemon/archive.go
index 4bcf8d0..c52d3b8 100644
--- a/daemon/archive.go
+++ b/daemon/archive.go
@@ -3,7 +3,6 @@
 import (
 	"io"
 	"os"
-	"path/filepath"
 	"strings"
 
 	"github.com/docker/docker/api/types"
@@ -20,6 +19,31 @@
 // path does not refer to a directory.
 var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory")
 
+// The daemon will use the following interfaces if the container fs implements
+// these for optimized copies to and from the container.
+type extractor interface {
+	ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error
+}
+
+type archiver interface {
+	ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error)
+}
+
+// helper functions to extract or archive
+func extractArchive(i interface{}, src io.Reader, dst string, opts *archive.TarOptions) error {
+	if ea, ok := i.(extractor); ok {
+		return ea.ExtractArchive(src, dst, opts)
+	}
+	return chrootarchive.Untar(src, dst, opts)
+}
+
+func archivePath(i interface{}, src string, opts *archive.TarOptions) (io.ReadCloser, error) {
+	if ap, ok := i.(archiver); ok {
+		return ap.ArchivePath(src, opts)
+	}
+	return archive.TarWithOptions(src, opts)
+}
+
 // ContainerCopy performs a deprecated operation of archiving the resource at
 // the specified path in the container identified by the given name.
 func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) {
@@ -138,6 +162,9 @@
 		return nil, err
 	}
 
+	// Normalize path before sending to rootfs
+	path = container.BaseFS.FromSlash(path)
+
 	resolvedPath, absPath, err := container.ResolvePath(path)
 	if err != nil {
 		return nil, err
@@ -178,6 +205,9 @@
 		return nil, nil, err
 	}
 
+	// Normalize path before sending to rootfs
+	path = container.BaseFS.FromSlash(path)
+
 	resolvedPath, absPath, err := container.ResolvePath(path)
 	if err != nil {
 		return nil, nil, err
@@ -196,7 +226,18 @@
 	// also catches the case when the root directory of the container is
 	// requested: we want the archive entries to start with "/" and not the
 	// container ID.
-	data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath))
+	driver := container.BaseFS
+
+	// Get the source and the base paths of the container resolved path in order
+	// to get the proper tar options for the rebase tar.
+	resolvedPath = driver.Clean(resolvedPath)
+	if driver.Base(resolvedPath) == "." {
+		resolvedPath += string(driver.Separator()) + "."
+	}
+	sourceDir, sourceBase := driver.Dir(resolvedPath), driver.Base(resolvedPath)
+	opts := archive.TarResourceRebaseOpts(sourceBase, driver.Base(absPath))
+
+	data, err := archivePath(driver, sourceDir, opts)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -235,8 +276,12 @@
 		return err
 	}
 
+	// Normalize path before sending to rootfs'
+	path = container.BaseFS.FromSlash(path)
+	driver := container.BaseFS
+
 	// Check if a drive letter supplied, it must be the system drive. No-op except on Windows
-	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path)
+	path, err = system.CheckSystemDriveAndRemoveDriveLetter(path, driver)
 	if err != nil {
 		return err
 	}
@@ -248,7 +293,10 @@
 	// that you can extract an archive to a symlink that points to a directory.
 
 	// Consider the given path as an absolute path in the container.
-	absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path)
+	absPath := archive.PreserveTrailingDotOrSeparator(
+		driver.Join(string(driver.Separator()), path),
+		path,
+		driver.Separator())
 
 	// This will evaluate the last path element if it is a symlink.
 	resolvedPath, err := container.GetResourcePath(absPath)
@@ -256,7 +304,7 @@
 		return err
 	}
 
-	stat, err := os.Lstat(resolvedPath)
+	stat, err := driver.Lstat(resolvedPath)
 	if err != nil {
 		return err
 	}
@@ -279,21 +327,24 @@
 	// a volume file path.
 	var baseRel string
 	if strings.HasPrefix(resolvedPath, `\\?\Volume{`) {
-		if strings.HasPrefix(resolvedPath, container.BaseFS) {
-			baseRel = resolvedPath[len(container.BaseFS):]
+		if strings.HasPrefix(resolvedPath, driver.Path()) {
+			baseRel = resolvedPath[len(driver.Path()):]
 			if baseRel[:1] == `\` {
 				baseRel = baseRel[1:]
 			}
 		}
 	} else {
-		baseRel, err = filepath.Rel(container.BaseFS, resolvedPath)
+		baseRel, err = driver.Rel(driver.Path(), resolvedPath)
 	}
 	if err != nil {
 		return err
 	}
 	// Make it an absolute path.
-	absPath = filepath.Join(string(filepath.Separator), baseRel)
+	absPath = driver.Join(string(driver.Separator()), baseRel)
 
+	// @ TODO: gupta-ak: Technically, this works since it no-ops
+	// on Windows and the file system is local anyway on linux.
+	// But eventually, it should be made driver aware.
 	toVolume, err := checkIfPathIsInAVolume(container, absPath)
 	if err != nil {
 		return err
@@ -315,7 +366,7 @@
 		}
 	}
 
-	if err := chrootarchive.Untar(content, resolvedPath, options); err != nil {
+	if err := extractArchive(driver, content, resolvedPath, options); err != nil {
 		return err
 	}
 
@@ -356,24 +407,28 @@
 		return nil, err
 	}
 
+	// Normalize path before sending to rootfs
+	resource = container.BaseFS.FromSlash(resource)
+	driver := container.BaseFS
+
 	basePath, err := container.GetResourcePath(resource)
 	if err != nil {
 		return nil, err
 	}
-	stat, err := os.Stat(basePath)
+	stat, err := driver.Stat(basePath)
 	if err != nil {
 		return nil, err
 	}
 	var filter []string
 	if !stat.IsDir() {
-		d, f := filepath.Split(basePath)
+		d, f := driver.Split(basePath)
 		basePath = d
 		filter = []string{f}
 	} else {
-		filter = []string{filepath.Base(basePath)}
-		basePath = filepath.Dir(basePath)
+		filter = []string{driver.Base(basePath)}
+		basePath = driver.Dir(basePath)
 	}
-	archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{
+	archive, err := archivePath(driver, basePath, &archive.TarOptions{
 		Compression:  archive.Uncompressed,
 		IncludeFiles: filter,
 	})
diff --git a/daemon/archive_unix.go b/daemon/archive_unix.go
index d5dfad7..ae12cae 100644
--- a/daemon/archive_unix.go
+++ b/daemon/archive_unix.go
@@ -4,6 +4,7 @@
 
 import (
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/volume"
 )
 
 // checkIfPathIsInAVolume checks if the path is in a volume. If it is, it
@@ -11,8 +12,9 @@
 // cannot be configured with a read-only rootfs.
 func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) {
 	var toVolume bool
+	parser := volume.NewParser(container.OS)
 	for _, mnt := range container.MountPoints {
-		if toVolume = mnt.HasResource(absPath); toVolume {
+		if toVolume = parser.HasResource(mnt, absPath); toVolume {
 			if mnt.RW {
 				break
 			}
diff --git a/daemon/attach.go b/daemon/attach.go
index 7b676cc..651a964 100644
--- a/daemon/attach.go
+++ b/daemon/attach.go
@@ -168,7 +168,7 @@
 		// Wait for the container to stop before returning.
 		waitChan := c.Wait(context.Background(), container.WaitConditionNotRunning)
 		defer func() {
-			_ = <-waitChan // Ignore returned exit code.
+			<-waitChan // Ignore returned exit code.
 		}()
 	}
 
diff --git a/daemon/bindmount_solaris.go b/daemon/bindmount_solaris.go
deleted file mode 100644
index 87bf3ef..0000000
--- a/daemon/bindmount_solaris.go
+++ /dev/null
@@ -1,5 +0,0 @@
-// +build solaris
-
-package daemon
-
-const bindMountType = "lofs"
diff --git a/daemon/build.go b/daemon/build.go
index 39269ab..6a00814 100644
--- a/daemon/build.go
+++ b/daemon/build.go
@@ -10,6 +10,7 @@
 	"github.com/docker/docker/builder"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/layer"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/registry"
@@ -25,9 +26,9 @@
 	rwLayer    layer.RWLayer
 }
 
-func (rl *releaseableLayer) Mount() (string, error) {
+func (rl *releaseableLayer) Mount() (containerfs.ContainerFS, error) {
 	var err error
-	var mountPath string
+	var mountPath containerfs.ContainerFS
 	var chainID layer.ChainID
 	if rl.roLayer != nil {
 		chainID = rl.roLayer.ChainID()
@@ -36,7 +37,7 @@
 	mountID := stringid.GenerateRandomID()
 	rl.rwLayer, err = rl.layerStore.CreateRWLayer(mountID, chainID, nil)
 	if err != nil {
-		return "", errors.Wrap(err, "failed to create rwlayer")
+		return nil, errors.Wrap(err, "failed to create rwlayer")
 	}
 
 	mountPath, err = rl.rwLayer.Mount("")
@@ -48,13 +49,13 @@
 			logrus.Errorf("Failed to release RWLayer: %s", err)
 		}
 		rl.rwLayer = nil
-		return "", err
+		return nil, err
 	}
 
 	return mountPath, nil
 }
 
-func (rl *releaseableLayer) Commit(platform string) (builder.ReleaseableLayer, error) {
+func (rl *releaseableLayer) Commit(os string) (builder.ReleaseableLayer, error) {
 	var chainID layer.ChainID
 	if rl.roLayer != nil {
 		chainID = rl.roLayer.ChainID()
@@ -66,7 +67,7 @@
 	}
 	defer stream.Close()
 
-	newLayer, err := rl.layerStore.Register(stream, chainID, layer.Platform(platform))
+	newLayer, err := rl.layerStore.Register(stream, chainID, layer.OS(os))
 	if err != nil {
 		return nil, err
 	}
@@ -175,7 +176,7 @@
 // leaking of layers.
 func (daemon *Daemon) GetImageAndReleasableLayer(ctx context.Context, refOrID string, opts backend.GetImageAndLayerOptions) (builder.Image, builder.ReleaseableLayer, error) {
 	if refOrID == "" {
-		layer, err := newReleasableLayerForImage(nil, daemon.stores[opts.Platform].layerStore)
+		layer, err := newReleasableLayerForImage(nil, daemon.stores[opts.OS].layerStore)
 		return nil, layer, err
 	}
 
@@ -186,16 +187,16 @@
 		}
 		// TODO: shouldn't we error out if error is different from "not found" ?
 		if image != nil {
-			layer, err := newReleasableLayerForImage(image, daemon.stores[opts.Platform].layerStore)
+			layer, err := newReleasableLayerForImage(image, daemon.stores[opts.OS].layerStore)
 			return image, layer, err
 		}
 	}
 
-	image, err := daemon.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.Platform)
+	image, err := daemon.pullForBuilder(ctx, refOrID, opts.AuthConfig, opts.Output, opts.OS)
 	if err != nil {
 		return nil, nil, err
 	}
-	layer, err := newReleasableLayerForImage(image, daemon.stores[opts.Platform].layerStore)
+	layer, err := newReleasableLayerForImage(image, daemon.stores[opts.OS].layerStore)
 	return image, layer, err
 }
 
diff --git a/daemon/checkpoint.go b/daemon/checkpoint.go
index d3028f1..5765af7 100644
--- a/daemon/checkpoint.go
+++ b/daemon/checkpoint.go
@@ -1,23 +1,24 @@
 package daemon
 
 import (
+	"context"
 	"encoding/json"
 	"fmt"
 	"io/ioutil"
 	"os"
 	"path/filepath"
 
-	"github.com/docker/docker/api"
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/daemon/names"
 )
 
 var (
-	validCheckpointNameChars   = api.RestrictedNameChars
-	validCheckpointNamePattern = api.RestrictedNamePattern
+	validCheckpointNameChars   = names.RestrictedNameChars
+	validCheckpointNamePattern = names.RestrictedNamePattern
 )
 
 // getCheckpointDir verifies checkpoint directory for create,remove, list options and checks if checkpoint already exists
-func getCheckpointDir(checkDir, checkpointID string, ctrName string, ctrID string, ctrCheckpointDir string, create bool) (string, error) {
+func getCheckpointDir(checkDir, checkpointID, ctrName, ctrID, ctrCheckpointDir string, create bool) (string, error) {
 	var checkpointDir string
 	var err2 error
 	if checkDir != "" {
@@ -32,7 +33,10 @@
 		case err == nil && stat.IsDir():
 			err2 = fmt.Errorf("checkpoint with name %s already exists for container %s", checkpointID, ctrName)
 		case err != nil && os.IsNotExist(err):
-			err2 = nil
+			err2 = os.MkdirAll(checkpointAbsDir, 0700)
+			if os.IsExist(err2) {
+				err2 = nil
+			}
 		case err != nil:
 			err2 = err
 		case err == nil:
@@ -48,7 +52,7 @@
 			err2 = fmt.Errorf("%s exists and is not a directory", checkpointAbsDir)
 		}
 	}
-	return checkpointDir, err2
+	return checkpointAbsDir, err2
 }
 
 // CheckpointCreate checkpoints the process running in a container with CRIU
@@ -62,6 +66,10 @@
 		return fmt.Errorf("Container %s not running", name)
 	}
 
+	if container.Config.Tty {
+		return fmt.Errorf("checkpoint not support on containers with tty")
+	}
+
 	if !validCheckpointNamePattern.MatchString(config.CheckpointID) {
 		return fmt.Errorf("Invalid checkpoint ID (%s), only %s are allowed", config.CheckpointID, validCheckpointNameChars)
 	}
@@ -71,8 +79,9 @@
 		return fmt.Errorf("cannot checkpoint container %s: %s", name, err)
 	}
 
-	err = daemon.containerd.CreateCheckpoint(container.ID, config.CheckpointID, checkpointDir, config.Exit)
+	err = daemon.containerd.CreateCheckpoint(context.Background(), container.ID, checkpointDir, config.Exit)
 	if err != nil {
+		os.RemoveAll(checkpointDir)
 		return fmt.Errorf("Cannot checkpoint container %s: %s", name, err)
 	}
 
diff --git a/daemon/cluster/convert/container.go b/daemon/cluster/convert/container.go
index 795e944..3f3ea34 100644
--- a/daemon/cluster/convert/container.go
+++ b/daemon/cluster/convert/container.go
@@ -34,6 +34,7 @@
 		Hosts:      c.Hosts,
 		Secrets:    secretReferencesFromGRPC(c.Secrets),
 		Configs:    configReferencesFromGRPC(c.Configs),
+		Isolation:  IsolationFromGRPC(c.Isolation),
 	}
 
 	if c.DNSConfig != nil {
@@ -232,6 +233,7 @@
 		Hosts:      c.Hosts,
 		Secrets:    secretReferencesToGRPC(c.Secrets),
 		Configs:    configReferencesToGRPC(c.Configs),
+		Isolation:  isolationToGRPC(c.Isolation),
 	}
 
 	if c.DNSConfig != nil {
@@ -354,3 +356,26 @@
 		StartPeriod: gogotypes.DurationProto(h.StartPeriod),
 	}
 }
+
+// IsolationFromGRPC converts a swarm api container isolation to a moby isolation representation
+func IsolationFromGRPC(i swarmapi.ContainerSpec_Isolation) container.Isolation {
+	switch i {
+	case swarmapi.ContainerIsolationHyperV:
+		return container.IsolationHyperV
+	case swarmapi.ContainerIsolationProcess:
+		return container.IsolationProcess
+	case swarmapi.ContainerIsolationDefault:
+		return container.IsolationDefault
+	}
+	return container.IsolationEmpty
+}
+
+func isolationToGRPC(i container.Isolation) swarmapi.ContainerSpec_Isolation {
+	if i.IsHyperV() {
+		return swarmapi.ContainerIsolationHyperV
+	}
+	if i.IsProcess() {
+		return swarmapi.ContainerIsolationProcess
+	}
+	return swarmapi.ContainerIsolationDefault
+}
diff --git a/daemon/cluster/convert/service_test.go b/daemon/cluster/convert/service_test.go
index 1b65989..a5c7cc4 100644
--- a/daemon/cluster/convert/service_test.go
+++ b/daemon/cluster/convert/service_test.go
@@ -3,10 +3,12 @@
 import (
 	"testing"
 
+	containertypes "github.com/docker/docker/api/types/container"
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/api/types/swarm/runtime"
 	swarmapi "github.com/docker/swarmkit/api"
 	google_protobuf3 "github.com/gogo/protobuf/types"
+	"github.com/stretchr/testify/require"
 )
 
 func TestServiceConvertFromGRPCRuntimeContainer(t *testing.T) {
@@ -148,3 +150,85 @@
 		t.Fatal(err)
 	}
 }
+
+func TestServiceConvertToGRPCIsolation(t *testing.T) {
+	cases := []struct {
+		name string
+		from containertypes.Isolation
+		to   swarmapi.ContainerSpec_Isolation
+	}{
+		{name: "empty", from: containertypes.IsolationEmpty, to: swarmapi.ContainerIsolationDefault},
+		{name: "default", from: containertypes.IsolationDefault, to: swarmapi.ContainerIsolationDefault},
+		{name: "process", from: containertypes.IsolationProcess, to: swarmapi.ContainerIsolationProcess},
+		{name: "hyperv", from: containertypes.IsolationHyperV, to: swarmapi.ContainerIsolationHyperV},
+		{name: "proCess", from: containertypes.Isolation("proCess"), to: swarmapi.ContainerIsolationProcess},
+		{name: "hypErv", from: containertypes.Isolation("hypErv"), to: swarmapi.ContainerIsolationHyperV},
+	}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			s := swarmtypes.ServiceSpec{
+				TaskTemplate: swarmtypes.TaskSpec{
+					ContainerSpec: &swarmtypes.ContainerSpec{
+						Image:     "alpine:latest",
+						Isolation: c.from,
+					},
+				},
+				Mode: swarmtypes.ServiceMode{
+					Global: &swarmtypes.GlobalService{},
+				},
+			}
+			res, err := ServiceSpecToGRPC(s)
+			require.NoError(t, err)
+			v, ok := res.Task.Runtime.(*swarmapi.TaskSpec_Container)
+			if !ok {
+				t.Fatal("expected type swarmapi.TaskSpec_Container")
+			}
+			require.Equal(t, c.to, v.Container.Isolation)
+		})
+	}
+}
+
+func TestServiceConvertFromGRPCIsolation(t *testing.T) {
+	cases := []struct {
+		name string
+		from swarmapi.ContainerSpec_Isolation
+		to   containertypes.Isolation
+	}{
+		{name: "default", to: containertypes.IsolationDefault, from: swarmapi.ContainerIsolationDefault},
+		{name: "process", to: containertypes.IsolationProcess, from: swarmapi.ContainerIsolationProcess},
+		{name: "hyperv", to: containertypes.IsolationHyperV, from: swarmapi.ContainerIsolationHyperV},
+	}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			gs := swarmapi.Service{
+				Meta: swarmapi.Meta{
+					Version: swarmapi.Version{
+						Index: 1,
+					},
+					CreatedAt: nil,
+					UpdatedAt: nil,
+				},
+				SpecVersion: &swarmapi.Version{
+					Index: 1,
+				},
+				Spec: swarmapi.ServiceSpec{
+					Task: swarmapi.TaskSpec{
+						Runtime: &swarmapi.TaskSpec_Container{
+							Container: &swarmapi.ContainerSpec{
+								Image:     "alpine:latest",
+								Isolation: c.from,
+							},
+						},
+					},
+				},
+			}
+
+			svc, err := ServiceFromGRPC(gs)
+			if err != nil {
+				t.Fatal(err)
+			}
+
+			require.Equal(t, c.to, svc.Spec.TaskTemplate.ContainerSpec.Isolation)
+		})
+	}
+}
diff --git a/daemon/cluster/errors.go b/daemon/cluster/errors.go
index 1698229..0ffe78b 100644
--- a/daemon/cluster/errors.go
+++ b/daemon/cluster/errors.go
@@ -15,6 +15,9 @@
 
 	// errSwarmCertificatesExpired is returned if docker was not started for the whole validity period and they had no chance to renew automatically.
 	errSwarmCertificatesExpired notAvailableError = "Swarm certificates have expired. To replace them, leave the swarm and join again."
+
+	// errSwarmNotManager is returned if the node is not a swarm manager.
+	errSwarmNotManager notAvailableError = "This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager."
 )
 
 type notFoundError struct {
diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go
index 2b26979..f6763d1 100644
--- a/daemon/cluster/executor/backend.go
+++ b/daemon/cluster/executor/backend.go
@@ -15,6 +15,7 @@
 	swarmtypes "github.com/docker/docker/api/types/swarm"
 	containerpkg "github.com/docker/docker/container"
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
+	networkSettings "github.com/docker/docker/daemon/network"
 	"github.com/docker/docker/plugin"
 	"github.com/docker/libnetwork"
 	"github.com/docker/libnetwork/cluster"
@@ -61,4 +62,5 @@
 	LookupImage(name string) (*types.ImageInspect, error)
 	PluginManager() *plugin.Manager
 	PluginGetter() *plugin.Store
+	GetAttachmentStore() *networkSettings.AttachmentStore
 }
diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go
index ded3533..81e740a 100644
--- a/daemon/cluster/executor/container/adapter.go
+++ b/daemon/cluster/executor/container/adapter.go
@@ -20,7 +20,6 @@
 	containerpkg "github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/cluster/convert"
 	executorpkg "github.com/docker/docker/daemon/cluster/executor"
-	"github.com/docker/docker/pkg/system"
 	"github.com/docker/libnetwork"
 	"github.com/docker/swarmkit/agent/exec"
 	"github.com/docker/swarmkit/api"
@@ -41,8 +40,8 @@
 	dependencies exec.DependencyGetter
 }
 
-func newContainerAdapter(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*containerAdapter, error) {
-	ctnr, err := newContainerConfig(task)
+func newContainerAdapter(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*containerAdapter, error) {
+	ctnr, err := newContainerConfig(task, node)
 	if err != nil {
 		return nil, err
 	}
@@ -93,9 +92,6 @@
 		// TODO @jhowardmsft LCOW Support: This will need revisiting as
 		// the stack is built up to include LCOW support for swarm.
 		platform := runtime.GOOS
-		if system.LCOWSupported() {
-			platform = "linux"
-		}
 		err := c.backend.PullImage(ctx, c.container.image(), "", platform, metaHeaders, authConfig, pw)
 		pw.CloseWithError(err)
 	}()
diff --git a/daemon/cluster/executor/container/attachment.go b/daemon/cluster/executor/container/attachment.go
index 54f95a1..405aa2d 100644
--- a/daemon/cluster/executor/container/attachment.go
+++ b/daemon/cluster/executor/container/attachment.go
@@ -20,8 +20,8 @@
 	closed  chan struct{}
 }
 
-func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*networkAttacherController, error) {
-	adapter, err := newContainerAdapter(b, task, dependencies)
+func newNetworkAttacherController(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*networkAttacherController, error) {
+	adapter, err := newContainerAdapter(b, task, node, dependencies)
 	if err != nil {
 		return nil, err
 	}
@@ -40,11 +40,7 @@
 
 func (nc *networkAttacherController) Prepare(ctx context.Context) error {
 	// Make sure all the networks that the task needs are created.
-	if err := nc.adapter.createNetworks(ctx); err != nil {
-		return err
-	}
-
-	return nil
+	return nc.adapter.createNetworks(ctx)
 }
 
 func (nc *networkAttacherController) Start(ctx context.Context) error {
@@ -69,11 +65,7 @@
 func (nc *networkAttacherController) Remove(ctx context.Context) error {
 	// Try removing the network referenced in this task in case this
 	// task is the last one referencing it
-	if err := nc.adapter.removeNetworks(ctx); err != nil {
-		return err
-	}
-
-	return nil
+	return nc.adapter.removeNetworks(ctx)
 }
 
 func (nc *networkAttacherController) Close() error {
diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go
index 8ea4126..4f41fb3 100644
--- a/daemon/cluster/executor/container/container.go
+++ b/daemon/cluster/executor/container/container.go
@@ -48,12 +48,12 @@
 
 // newContainerConfig returns a validated container config. No methods should
 // return an error if this function returns without error.
-func newContainerConfig(t *api.Task) (*containerConfig, error) {
+func newContainerConfig(t *api.Task, node *api.NodeDescription) (*containerConfig, error) {
 	var c containerConfig
-	return &c, c.setTask(t)
+	return &c, c.setTask(t, node)
 }
 
-func (c *containerConfig) setTask(t *api.Task) error {
+func (c *containerConfig) setTask(t *api.Task, node *api.NodeDescription) error {
 	if t.Spec.GetContainer() == nil && t.Spec.GetAttachment() == nil {
 		return exec.ErrRuntimeUnsupported
 	}
@@ -78,7 +78,7 @@
 	c.task = t
 
 	if t.Spec.GetContainer() != nil {
-		preparedSpec, err := template.ExpandContainerSpec(nil, t)
+		preparedSpec, err := template.ExpandContainerSpec(node, t)
 		if err != nil {
 			return err
 		}
@@ -168,6 +168,10 @@
 	return portBindings
 }
 
+func (c *containerConfig) isolation() enginecontainer.Isolation {
+	return convert.IsolationFromGRPC(c.spec().Isolation)
+}
+
 func (c *containerConfig) exposedPorts() map[nat.Port]struct{} {
 	exposedPorts := make(map[nat.Port]struct{})
 	if c.task.Endpoint == nil {
@@ -350,6 +354,7 @@
 		PortBindings:   c.portBindings(),
 		Mounts:         c.mounts(),
 		ReadonlyRootfs: c.spec().ReadOnly,
+		Isolation:      c.isolation(),
 	}
 
 	if c.spec().DNSConfig != nil {
diff --git a/daemon/cluster/executor/container/container_test.go b/daemon/cluster/executor/container/container_test.go
new file mode 100644
index 0000000..a583d14
--- /dev/null
+++ b/daemon/cluster/executor/container/container_test.go
@@ -0,0 +1,37 @@
+package container
+
+import (
+	"testing"
+
+	container "github.com/docker/docker/api/types/container"
+	swarmapi "github.com/docker/swarmkit/api"
+	"github.com/stretchr/testify/require"
+)
+
+func TestIsolationConversion(t *testing.T) {
+	cases := []struct {
+		name string
+		from swarmapi.ContainerSpec_Isolation
+		to   container.Isolation
+	}{
+		{name: "default", from: swarmapi.ContainerIsolationDefault, to: container.IsolationDefault},
+		{name: "process", from: swarmapi.ContainerIsolationProcess, to: container.IsolationProcess},
+		{name: "hyperv", from: swarmapi.ContainerIsolationHyperV, to: container.IsolationHyperV},
+	}
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			task := swarmapi.Task{
+				Spec: swarmapi.TaskSpec{
+					Runtime: &swarmapi.TaskSpec_Container{
+						Container: &swarmapi.ContainerSpec{
+							Image:     "alpine:latest",
+							Isolation: c.from,
+						},
+					},
+				},
+			}
+			config := containerConfig{task: &task}
+			require.Equal(t, c.to, config.hostConfig().Isolation)
+		})
+	}
+}
diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go
index 3ba4302..dda1259 100644
--- a/daemon/cluster/executor/container/controller.go
+++ b/daemon/cluster/executor/container/controller.go
@@ -40,8 +40,8 @@
 var _ exec.Controller = &controller{}
 
 // NewController returns a docker exec runner for the provided task.
-func newController(b executorpkg.Backend, task *api.Task, dependencies exec.DependencyGetter) (*controller, error) {
-	adapter, err := newContainerAdapter(b, task, dependencies)
+func newController(b executorpkg.Backend, task *api.Task, node *api.NodeDescription, dependencies exec.DependencyGetter) (*controller, error) {
+	adapter, err := newContainerAdapter(b, task, node, dependencies)
 	if err != nil {
 		return nil, err
 	}
diff --git a/daemon/cluster/executor/container/executor.go b/daemon/cluster/executor/container/executor.go
index fb339f1..14e41fa 100644
--- a/daemon/cluster/executor/container/executor.go
+++ b/daemon/cluster/executor/container/executor.go
@@ -4,6 +4,7 @@
 	"fmt"
 	"sort"
 	"strings"
+	"sync"
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
@@ -26,6 +27,8 @@
 	backend       executorpkg.Backend
 	pluginBackend plugin.Backend
 	dependencies  exec.DependencyManager
+	mutex         sync.Mutex // This mutex protects the following node field
+	node          *api.NodeDescription
 }
 
 // NewExecutor returns an executor from the docker client.
@@ -124,27 +127,41 @@
 		},
 	}
 
+	// Save the node information in the executor field
+	e.mutex.Lock()
+	e.node = description
+	e.mutex.Unlock()
+
 	return description, nil
 }
 
 func (e *executor) Configure(ctx context.Context, node *api.Node) error {
-	na := node.Attachment
-	if na == nil {
+	var ingressNA *api.NetworkAttachment
+	attachments := make(map[string]string)
+
+	for _, na := range node.Attachments {
+		if na.Network.Spec.Ingress {
+			ingressNA = na
+		}
+		attachments[na.Network.ID] = na.Addresses[0]
+	}
+
+	if ingressNA == nil {
 		e.backend.ReleaseIngress()
-		return nil
+		return e.backend.GetAttachmentStore().ResetAttachments(attachments)
 	}
 
 	options := types.NetworkCreate{
-		Driver: na.Network.DriverState.Name,
+		Driver: ingressNA.Network.DriverState.Name,
 		IPAM: &network.IPAM{
-			Driver: na.Network.IPAM.Driver.Name,
+			Driver: ingressNA.Network.IPAM.Driver.Name,
 		},
-		Options:        na.Network.DriverState.Options,
+		Options:        ingressNA.Network.DriverState.Options,
 		Ingress:        true,
 		CheckDuplicate: true,
 	}
 
-	for _, ic := range na.Network.IPAM.Configs {
+	for _, ic := range ingressNA.Network.IPAM.Configs {
 		c := network.IPAMConfig{
 			Subnet:  ic.Subnet,
 			IPRange: ic.Range,
@@ -154,22 +171,30 @@
 	}
 
 	_, err := e.backend.SetupIngress(clustertypes.NetworkCreateRequest{
-		ID: na.Network.ID,
+		ID: ingressNA.Network.ID,
 		NetworkCreateRequest: types.NetworkCreateRequest{
-			Name:          na.Network.Spec.Annotations.Name,
+			Name:          ingressNA.Network.Spec.Annotations.Name,
 			NetworkCreate: options,
 		},
-	}, na.Addresses[0])
+	}, ingressNA.Addresses[0])
+	if err != nil {
+		return err
+	}
 
-	return err
+	return e.backend.GetAttachmentStore().ResetAttachments(attachments)
 }
 
 // Controller returns a docker container runner.
 func (e *executor) Controller(t *api.Task) (exec.Controller, error) {
 	dependencyGetter := agent.Restrict(e.dependencies, t)
 
+	// Get the node description from the executor field
+	e.mutex.Lock()
+	nodeDescription := e.node
+	e.mutex.Unlock()
+
 	if t.Spec.GetAttachment() != nil {
-		return newNetworkAttacherController(e.backend, t, dependencyGetter)
+		return newNetworkAttacherController(e.backend, t, nodeDescription, dependencyGetter)
 	}
 
 	var ctlr exec.Controller
@@ -198,7 +223,7 @@
 			return ctlr, fmt.Errorf("unsupported runtime type: %q", runtimeKind)
 		}
 	case *api.TaskSpec_Container:
-		c, err := newController(e.backend, t, dependencyGetter)
+		c, err := newController(e.backend, t, nodeDescription, dependencyGetter)
 		if err != nil {
 			return ctlr, err
 		}
diff --git a/daemon/cluster/executor/container/health_test.go b/daemon/cluster/executor/container/health_test.go
index b6f1885..450865e 100644
--- a/daemon/cluster/executor/container/health_test.go
+++ b/daemon/cluster/executor/container/health_test.go
@@ -52,7 +52,7 @@
 		EventsService: e,
 	}
 
-	controller, err := newController(daemon, task, nil)
+	controller, err := newController(daemon, task, nil, nil)
 	if err != nil {
 		t.Fatalf("create controller fail %v", err)
 	}
diff --git a/daemon/cluster/executor/container/validate_test.go b/daemon/cluster/executor/container/validate_test.go
index 9d98e2c..43e224a 100644
--- a/daemon/cluster/executor/container/validate_test.go
+++ b/daemon/cluster/executor/container/validate_test.go
@@ -26,7 +26,8 @@
 				},
 			},
 		},
-	}, nil)
+	}, nil,
+		nil)
 }
 
 func TestControllerValidateMountBind(t *testing.T) {
diff --git a/daemon/cluster/listen_addr_others.go b/daemon/cluster/listen_addr_others.go
index 4e845f5..ebf7dae 100644
--- a/daemon/cluster/listen_addr_others.go
+++ b/daemon/cluster/listen_addr_others.go
@@ -1,4 +1,4 @@
-// +build !linux,!solaris
+// +build !linux
 
 package cluster
 
diff --git a/daemon/cluster/services.go b/daemon/cluster/services.go
index 0edb407..f3fba8d 100644
--- a/daemon/cluster/services.go
+++ b/daemon/cluster/services.go
@@ -74,7 +74,7 @@
 	services := make([]types.Service, 0, len(r.Services))
 
 	for _, service := range r.Services {
-		if options.Filters.Include("mode") {
+		if options.Filters.Contains("mode") {
 			var mode string
 			switch service.Spec.GetMode().(type) {
 			case *swarmapi.ServiceSpec_Global:
diff --git a/daemon/cluster/swarm.go b/daemon/cluster/swarm.go
index 1fa6292..6122369 100644
--- a/daemon/cluster/swarm.go
+++ b/daemon/cluster/swarm.go
@@ -26,9 +26,13 @@
 	defer c.controlMutex.Unlock()
 	if c.nr != nil {
 		if req.ForceNewCluster {
+
 			// Take c.mu temporarily to wait for presently running
 			// API handlers to finish before shutting down the node.
 			c.mu.Lock()
+			if !c.nr.nodeState.IsManager() {
+				return "", errSwarmNotManager
+			}
 			c.mu.Unlock()
 
 			if err := c.nr.Stop(); err != nil {
@@ -194,9 +198,9 @@
 
 // Inspect retrieves the configuration properties of a managed swarm cluster.
 func (c *Cluster) Inspect() (types.Swarm, error) {
-	var swarm *swarmapi.Cluster
+	var swarm types.Swarm
 	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
-		s, err := getSwarm(ctx, state.controlClient)
+		s, err := c.inspect(ctx, state)
 		if err != nil {
 			return err
 		}
@@ -205,7 +209,15 @@
 	}); err != nil {
 		return types.Swarm{}, err
 	}
-	return convert.SwarmFromGRPC(*swarm), nil
+	return swarm, nil
+}
+
+func (c *Cluster) inspect(ctx context.Context, state nodeState) (types.Swarm, error) {
+	s, err := getSwarm(ctx, state.controlClient)
+	if err != nil {
+		return types.Swarm{}, err
+	}
+	return convert.SwarmFromGRPC(*s), nil
 }
 
 // Update updates configuration of a managed swarm cluster.
@@ -409,7 +421,7 @@
 
 	if state.IsActiveManager() {
 		info.ControlAvailable = true
-		swarm, err := c.Inspect()
+		swarm, err := c.inspect(ctx, state)
 		if err != nil {
 			info.Error = err.Error()
 		}
diff --git a/daemon/cluster/tasks.go b/daemon/cluster/tasks.go
index 26706a2..f52301f 100644
--- a/daemon/cluster/tasks.go
+++ b/daemon/cluster/tasks.go
@@ -15,7 +15,7 @@
 
 	if err := c.lockedManagerAction(func(ctx context.Context, state nodeState) error {
 		filterTransform := func(filter filters.Args) error {
-			if filter.Include("service") {
+			if filter.Contains("service") {
 				serviceFilters := filter.Get("service")
 				for _, serviceFilter := range serviceFilters {
 					service, err := getService(ctx, state.controlClient, serviceFilter, false)
@@ -26,7 +26,7 @@
 					filter.Add("service", service.ID)
 				}
 			}
-			if filter.Include("node") {
+			if filter.Contains("node") {
 				nodeFilters := filter.Get("node")
 				for _, nodeFilter := range nodeFilters {
 					node, err := getNode(ctx, state.controlClient, nodeFilter)
@@ -37,7 +37,7 @@
 					filter.Add("node", node.ID)
 				}
 			}
-			if !filter.Include("runtime") {
+			if !filter.Contains("runtime") {
 				// default to only showing container tasks
 				filter.Add("runtime", "container")
 				filter.Add("runtime", "")
diff --git a/daemon/commit.go b/daemon/commit.go
index 084f488..0053132 100644
--- a/daemon/commit.go
+++ b/daemon/commit.go
@@ -2,6 +2,7 @@
 
 import (
 	"encoding/json"
+	"fmt"
 	"io"
 	"runtime"
 	"strings"
@@ -128,11 +129,21 @@
 		return "", err
 	}
 
-	// It is not possible to commit a running container on Windows and on Solaris.
-	if (runtime.GOOS == "windows" || runtime.GOOS == "solaris") && container.IsRunning() {
+	// It is not possible to commit a running container on Windows
+	if (runtime.GOOS == "windows") && container.IsRunning() {
 		return "", errors.Errorf("%+v does not support commit of a running container", runtime.GOOS)
 	}
 
+	if container.IsDead() {
+		err := fmt.Errorf("You cannot commit container %s which is Dead", container.ID)
+		return "", stateConflictError{err}
+	}
+
+	if container.IsRemovalInProgress() {
+		err := fmt.Errorf("You cannot commit container %s which is being removed", container.ID)
+		return "", stateConflictError{err}
+	}
+
 	if c.Pause && !container.IsPaused() {
 		daemon.containerPause(container)
 		defer daemon.containerUnpause(container)
@@ -164,17 +175,17 @@
 		parent = new(image.Image)
 		parent.RootFS = image.NewRootFS()
 	} else {
-		parent, err = daemon.stores[container.Platform].imageStore.Get(container.ImageID)
+		parent, err = daemon.stores[container.OS].imageStore.Get(container.ImageID)
 		if err != nil {
 			return "", err
 		}
 	}
 
-	l, err := daemon.stores[container.Platform].layerStore.Register(rwTar, parent.RootFS.ChainID(), layer.Platform(container.Platform))
+	l, err := daemon.stores[container.OS].layerStore.Register(rwTar, parent.RootFS.ChainID(), layer.OS(container.OS))
 	if err != nil {
 		return "", err
 	}
-	defer layer.ReleaseAndLog(daemon.stores[container.Platform].layerStore, l)
+	defer layer.ReleaseAndLog(daemon.stores[container.OS].layerStore, l)
 
 	containerConfig := c.ContainerConfig
 	if containerConfig == nil {
@@ -188,18 +199,18 @@
 		Config:          newConfig,
 		DiffID:          l.DiffID(),
 	}
-	config, err := json.Marshal(image.NewChildImage(parent, cc, container.Platform))
+	config, err := json.Marshal(image.NewChildImage(parent, cc, container.OS))
 	if err != nil {
 		return "", err
 	}
 
-	id, err := daemon.stores[container.Platform].imageStore.Create(config)
+	id, err := daemon.stores[container.OS].imageStore.Create(config)
 	if err != nil {
 		return "", err
 	}
 
 	if container.ImageID != "" {
-		if err := daemon.stores[container.Platform].imageStore.SetParent(id, container.ImageID); err != nil {
+		if err := daemon.stores[container.OS].imageStore.SetParent(id, container.ImageID); err != nil {
 			return "", err
 		}
 	}
@@ -218,7 +229,7 @@
 				return "", err
 			}
 		}
-		if err := daemon.TagImageWithReference(id, container.Platform, newTag); err != nil {
+		if err := daemon.TagImageWithReference(id, container.OS, newTag); err != nil {
 			return "", err
 		}
 		imageRef = reference.FamiliarString(newTag)
@@ -234,19 +245,36 @@
 	return id.String(), nil
 }
 
-func (daemon *Daemon) exportContainerRw(container *container.Container) (io.ReadCloser, error) {
-	if err := daemon.Mount(container); err != nil {
+func (daemon *Daemon) exportContainerRw(container *container.Container) (arch io.ReadCloser, err error) {
+	rwlayer, err := daemon.stores[container.OS].layerStore.GetRWLayer(container.ID)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		if err != nil {
+			daemon.stores[container.OS].layerStore.ReleaseRWLayer(rwlayer)
+		}
+	}()
+
+	// TODO: this mount call is not necessary as we assume that TarStream() should
+	// mount the layer if needed. But the Diff() function for windows requests that
+	// the layer should be mounted when calling it. So we reserve this mount call
+	// until windows driver can implement Diff() interface correctly.
+	_, err = rwlayer.Mount(container.GetMountLabel())
+	if err != nil {
 		return nil, err
 	}
 
-	archive, err := container.RWLayer.TarStream()
+	archive, err := rwlayer.TarStream()
 	if err != nil {
-		daemon.Unmount(container) // logging is already handled in the `Unmount` function
+		rwlayer.Unmount()
 		return nil, err
 	}
 	return ioutils.NewReadCloserWrapper(archive, func() error {
 			archive.Close()
-			return container.RWLayer.Unmount()
+			err = rwlayer.Unmount()
+			daemon.stores[container.OS].layerStore.ReleaseRWLayer(rwlayer)
+			return err
 		}),
 		nil
 }
diff --git a/daemon/config/config.go b/daemon/config/config.go
index d01d4a4..f7a0df5 100644
--- a/daemon/config/config.go
+++ b/daemon/config/config.go
@@ -7,6 +7,7 @@
 	"fmt"
 	"io"
 	"io/ioutil"
+	"os"
 	"reflect"
 	"runtime"
 	"strings"
@@ -101,9 +102,9 @@
 	RawLogs              bool                      `json:"raw-logs,omitempty"`
 	RootDeprecated       string                    `json:"graph,omitempty"`
 	Root                 string                    `json:"data-root,omitempty"`
+	ExecRoot             string                    `json:"exec-root,omitempty"`
 	SocketGroup          string                    `json:"group,omitempty"`
 	CorsHeaders          string                    `json:"api-cors-header,omitempty"`
-	EnableCors           bool                      `json:"api-enable-cors,omitempty"`
 
 	// TrustKeyPath is used to generate the daemon ID and for signing schema 1 manifests
 	// when pushing to a registry which does not support schema 2. This field is marked as
@@ -170,9 +171,14 @@
 	Experimental bool `json:"experimental"` // Experimental indicates whether experimental features should be exposed or not
 
 	// Exposed node Generic Resources
-	NodeGenericResources string `json:"node-generic-resources,omitempty"`
+	// e.g: ["orange=red", "orange=green", "orange=blue", "apple=3"]
+	NodeGenericResources []string `json:"node-generic-resources,omitempty"`
 	// NetworkControlPlaneMTU allows to specify the control plane MTU, this will allow to optimize the network use in some components
 	NetworkControlPlaneMTU int `json:"network-control-plane-mtu,omitempty"`
+
+	// ContainerAddr is the address used to connect to containerd if we're
+	// not starting it ourselves
+	ContainerdAddr string `json:"containerd,omitempty"`
 }
 
 // IsValueSet returns true if a configuration value
@@ -199,9 +205,6 @@
 
 // ParseClusterAdvertiseSettings parses the specified advertise settings
 func ParseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) {
-	if runtime.GOOS == "solaris" && (clusterAdvertise != "" || clusterStore != "") {
-		return "", errors.New("Cluster Advertise Settings not supported on Solaris")
-	}
 	if clusterAdvertise == "" {
 		return "", daemondiscovery.ErrDiscoveryDisabled
 	}
@@ -245,7 +248,10 @@
 	logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile)
 	newConfig, err := getConflictFreeConfiguration(configFile, flags)
 	if err != nil {
-		return err
+		if flags.Changed("config-file") || !os.IsNotExist(err) {
+			return fmt.Errorf("unable to configure the Docker daemon with file %s: %v", configFile, err)
+		}
+		newConfig = New()
 	}
 
 	if err := Validate(newConfig); err != nil {
diff --git a/daemon/config/config_common_unix.go b/daemon/config/config_common_unix.go
index d11cceb..d2fa2e0 100644
--- a/daemon/config/config_common_unix.go
+++ b/daemon/config/config_common_unix.go
@@ -1,4 +1,4 @@
-// +build solaris linux freebsd
+// +build linux freebsd
 
 package config
 
@@ -11,8 +11,6 @@
 // CommonUnixConfig defines configuration of a docker daemon that is
 // common across Unix platforms.
 type CommonUnixConfig struct {
-	ExecRoot          string                   `json:"exec-root,omitempty"`
-	ContainerdAddr    string                   `json:"containerd,omitempty"`
 	Runtimes          map[string]types.Runtime `json:"runtimes,omitempty"`
 	DefaultRuntime    string                   `json:"default-runtime,omitempty"`
 	DefaultInitBinary string                   `json:"default-init,omitempty"`
diff --git a/daemon/config/config_test.go b/daemon/config/config_test.go
index 43246d9..bdd046b 100644
--- a/daemon/config/config_test.go
+++ b/daemon/config/config_test.go
@@ -3,7 +3,6 @@
 import (
 	"io/ioutil"
 	"os"
-	"runtime"
 	"strings"
 	"testing"
 
@@ -38,9 +37,6 @@
 }
 
 func TestParseClusterAdvertiseSettings(t *testing.T) {
-	if runtime.GOOS == "solaris" {
-		t.Skip("ClusterSettings not supported on Solaris\n")
-	}
 	_, err := ParseClusterAdvertiseSettings("something", "")
 	if err != discovery.ErrDiscoveryDisabled {
 		t.Fatalf("expected discovery disabled error, got %v\n", err)
@@ -263,6 +259,20 @@
 				},
 			},
 		},
+		{
+			config: &Config{
+				CommonConfig: CommonConfig{
+					NodeGenericResources: []string{"foo"},
+				},
+			},
+		},
+		{
+			config: &Config{
+				CommonConfig: CommonConfig{
+					NodeGenericResources: []string{"foo=bar", "foo=1"},
+				},
+			},
+		},
 	}
 	for _, tc := range testCases {
 		err := Validate(tc.config)
@@ -320,6 +330,20 @@
 				},
 			},
 		},
+		{
+			config: &Config{
+				CommonConfig: CommonConfig{
+					NodeGenericResources: []string{"foo=bar", "foo=baz"},
+				},
+			},
+		},
+		{
+			config: &Config{
+				CommonConfig: CommonConfig{
+					NodeGenericResources: []string{"foo=1"},
+				},
+			},
+		},
 	}
 	for _, tc := range testCases {
 		err := Validate(tc.config)
@@ -389,3 +413,49 @@
 		},
 	}
 }
+
+// TestReloadSetConfigFileNotExist tests that when `--config-file` is set
+// and it doesn't exist the `Reload` function returns an error.
+func TestReloadSetConfigFileNotExist(t *testing.T) {
+	configFile := "/tmp/blabla/not/exists/config.json"
+	flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
+	flags.String("config-file", "", "")
+	flags.Set("config-file", configFile)
+
+	err := Reload(configFile, flags, func(c *Config) {})
+	assert.Error(t, err)
+	testutil.ErrorContains(t, err, "unable to configure the Docker daemon with file")
+}
+
+// TestReloadDefaultConfigNotExist tests that if the default configuration file
+// doesn't exist the daemon still will be reloaded.
+func TestReloadDefaultConfigNotExist(t *testing.T) {
+	reloaded := false
+	configFile := "/etc/docker/daemon.json"
+	flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
+	flags.String("config-file", configFile, "")
+	err := Reload(configFile, flags, func(c *Config) {
+		reloaded = true
+	})
+	assert.Nil(t, err)
+	assert.True(t, reloaded)
+}
+
+// TestReloadBadDefaultConfig tests that when `--config-file` is not set
+// and the default configuration file exists and is bad return an error
+func TestReloadBadDefaultConfig(t *testing.T) {
+	f, err := ioutil.TempFile("", "docker-config-")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	configFile := f.Name()
+	f.Write([]byte(`{wrong: "configuration"}`))
+	f.Close()
+
+	flags := pflag.NewFlagSet("test", pflag.ContinueOnError)
+	flags.String("config-file", configFile, "")
+	err = Reload(configFile, flags, func(c *Config) {})
+	assert.Error(t, err)
+	testutil.ErrorContains(t, err, "unable to configure the Docker daemon with file")
+}
diff --git a/daemon/config/config_unix_test.go b/daemon/config/config_unix_test.go
index 5987e77..fedf757 100644
--- a/daemon/config/config_unix_test.go
+++ b/daemon/config/config_unix_test.go
@@ -6,7 +6,7 @@
 	"testing"
 
 	"github.com/docker/docker/opts"
-	"github.com/docker/go-units"
+	units "github.com/docker/go-units"
 	"github.com/gotestyourself/gotestyourself/fs"
 	"github.com/spf13/pflag"
 	"github.com/stretchr/testify/assert"
@@ -14,7 +14,7 @@
 )
 
 func TestGetConflictFreeConfiguration(t *testing.T) {
-	configFileData := string([]byte(`
+	configFileData := `
 		{
 			"debug": true,
 			"default-ulimits": {
@@ -27,7 +27,7 @@
 			"log-opts": {
 				"tag": "test_tag"
 			}
-		}`))
+		}`
 
 	file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData))
 	defer file.Remove()
@@ -55,7 +55,7 @@
 }
 
 func TestDaemonConfigurationMerge(t *testing.T) {
-	configFileData := string([]byte(`
+	configFileData := `
 		{
 			"debug": true,
 			"default-ulimits": {
@@ -68,7 +68,7 @@
 			"log-opts": {
 				"tag": "test_tag"
 			}
-		}`))
+		}`
 
 	file := fs.NewFile(t, "docker-config", fs.WithContent(configFileData))
 	defer file.Remove()
@@ -115,10 +115,7 @@
 }
 
 func TestDaemonConfigurationMergeShmSize(t *testing.T) {
-	data := string([]byte(`
-		{
-			"default-shm-size": "1g"
-		}`))
+	data := `{"default-shm-size": "1g"}`
 
 	file := fs.NewFile(t, "docker-config", fs.WithContent(data))
 	defer file.Remove()
@@ -133,7 +130,5 @@
 	require.NoError(t, err)
 
 	expectedValue := 1 * 1024 * 1024 * 1024
-	if cc.ShmSize.Value() != int64(expectedValue) {
-		t.Fatalf("expected default shm size %d, got %d", expectedValue, cc.ShmSize.Value())
-	}
+	assert.Equal(t, int64(expectedValue), cc.ShmSize.Value())
 }
diff --git a/daemon/config/opts.go b/daemon/config/opts.go
index 00f32e4..cdf2649 100644
--- a/daemon/config/opts.go
+++ b/daemon/config/opts.go
@@ -7,8 +7,8 @@
 )
 
 // ParseGenericResources parses and validates the specified string as a list of GenericResource
-func ParseGenericResources(value string) ([]swarm.GenericResource, error) {
-	if value == "" {
+func ParseGenericResources(value []string) ([]swarm.GenericResource, error) {
+	if len(value) == 0 {
 		return nil, nil
 	}
 
diff --git a/daemon/container.go b/daemon/container.go
index 79b9d41..7338514 100644
--- a/daemon/container.go
+++ b/daemon/container.go
@@ -123,7 +123,7 @@
 	return c.CheckpointTo(daemon.containersReplica)
 }
 
-func (daemon *Daemon) newContainer(name string, platform string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) {
+func (daemon *Daemon) newContainer(name string, operatingSystem string, config *containertypes.Config, hostConfig *containertypes.HostConfig, imgID image.ID, managed bool) (*container.Container, error) {
 	var (
 		id             string
 		err            error
@@ -156,8 +156,8 @@
 	base.ImageID = imgID
 	base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName}
 	base.Name = name
-	base.Driver = daemon.GraphDriverName(platform)
-	base.Platform = platform
+	base.Driver = daemon.GraphDriverName(operatingSystem)
+	base.OS = operatingSystem
 	return base, err
 }
 
diff --git a/daemon/container_operations_solaris.go b/daemon/container_operations_solaris.go
deleted file mode 100644
index c5728d0..0000000
--- a/daemon/container_operations_solaris.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build solaris
-
-package daemon
-
-import (
-	"github.com/docker/docker/container"
-	"github.com/docker/docker/runconfig"
-	"github.com/docker/libnetwork"
-)
-
-func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) {
-	return nil, nil
-}
-
-func (daemon *Daemon) setupIpcDirs(container *container.Container) error {
-	return nil
-}
-
-func killProcessDirectly(container *container.Container) error {
-	return nil
-}
-
-func detachMounted(path string) error {
-	return nil
-}
-
-func isLinkable(child *container.Container) bool {
-	// A container is linkable only if it belongs to the default network
-	_, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()]
-	return ok
-}
-
-func enableIPOnPredefinedNetwork() bool {
-	return false
-}
-
-func (daemon *Daemon) isNetworkHotPluggable() bool {
-	return false
-}
-
-func setupPathsAndSandboxOptions(container *container.Container, sboxOptions *[]libnetwork.SandboxOption) error {
-	return nil
-}
-
-func (daemon *Daemon) initializeNetworkingPaths(container *container.Container, nc *container.Container) error {
-	return nil
-}
diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go
index 84b7eb3..09b9b3f 100644
--- a/daemon/container_operations_unix.go
+++ b/daemon/container_operations_unix.go
@@ -84,7 +84,7 @@
 	containerID := container.HostConfig.PidMode.Container()
 	container, err := daemon.GetContainer(containerID)
 	if err != nil {
-		return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", container.ID)
+		return nil, errors.Wrapf(err, "cannot join PID of a non running container: %s", containerID)
 	}
 	return container, daemon.checkContainer(container, containerIsRunning, containerIsNotRestarting)
 }
@@ -307,6 +307,8 @@
 		if err := os.Chown(fPath, rootIDs.UID+uid, rootIDs.GID+gid); err != nil {
 			return errors.Wrap(err, "error setting ownership for config")
 		}
+
+		label.Relabel(fPath, c.MountLabel, false)
 	}
 
 	return nil
diff --git a/daemon/create.go b/daemon/create.go
index c722405..e4d17cc 100644
--- a/daemon/create.go
+++ b/daemon/create.go
@@ -39,17 +39,21 @@
 		return containertypes.ContainerCreateCreatedBody{}, validationError{errors.New("Config cannot be empty in order to create a container")}
 	}
 
-	// TODO: @jhowardmsft LCOW support - at a later point, can remove the hard-coding
-	// to force the platform to be linux.
-	// Default the platform if not supplied
-	if params.Platform == "" {
-		params.Platform = runtime.GOOS
-	}
-	if system.LCOWSupported() {
-		params.Platform = "linux"
+	os := runtime.GOOS
+	if params.Config.Image != "" {
+		img, err := daemon.GetImage(params.Config.Image)
+		if err == nil {
+			os = img.OS
+		}
+	} else {
+		// This mean scratch. On Windows, we can safely assume that this is a linux
+		// container. On other platforms, it's the host OS (which it already is)
+		if runtime.GOOS == "windows" && system.LCOWSupported() {
+			os = "linux"
+		}
 	}
 
-	warnings, err := daemon.verifyContainerSettings(params.Platform, params.HostConfig, params.Config, false)
+	warnings, err := daemon.verifyContainerSettings(os, params.HostConfig, params.Config, false)
 	if err != nil {
 		return containertypes.ContainerCreateCreatedBody{Warnings: warnings}, validationError{err}
 	}
@@ -85,29 +89,28 @@
 		err       error
 	)
 
+	os := runtime.GOOS
 	if params.Config.Image != "" {
 		img, err = daemon.GetImage(params.Config.Image)
 		if err != nil {
 			return nil, err
 		}
-
-		if runtime.GOOS == "solaris" && img.OS != "solaris " {
-			return nil, errors.New("platform on which parent image was created is not Solaris")
+		if img.OS != "" {
+			os = img.OS
+		} else {
+			// default to the host OS except on Windows with LCOW
+			if runtime.GOOS == "windows" && system.LCOWSupported() {
+				os = "linux"
+			}
 		}
 		imgID = img.ID()
 
 		if runtime.GOOS == "windows" && img.OS == "linux" && !system.LCOWSupported() {
-			return nil, errors.New("platform on which parent image was created is not Windows")
+			return nil, errors.New("operating system on which parent image was created is not Windows")
 		}
-	}
-
-	// Make sure the platform requested matches the image
-	if img != nil {
-		if params.Platform != img.Platform() {
-			// Ignore this in LCOW mode. @jhowardmsft TODO - This will need revisiting later.
-			if !system.LCOWSupported() {
-				return nil, fmt.Errorf("cannot create a %s container from a %s image", params.Platform, img.Platform())
-			}
+	} else {
+		if runtime.GOOS == "windows" {
+			os = "linux" // 'scratch' case.
 		}
 	}
 
@@ -119,7 +122,7 @@
 		return nil, validationError{err}
 	}
 
-	if container, err = daemon.newContainer(params.Name, params.Platform, params.Config, params.HostConfig, imgID, managed); err != nil {
+	if container, err = daemon.newContainer(params.Name, os, params.Config, params.HostConfig, imgID, managed); err != nil {
 		return nil, err
 	}
 	defer func() {
@@ -170,7 +173,7 @@
 		return nil, err
 	}
 
-	if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil {
+	if err := daemon.createContainerOSSpecificSettings(container, params.Config, params.HostConfig); err != nil {
 		return nil, err
 	}
 
@@ -253,7 +256,7 @@
 func (daemon *Daemon) setRWLayer(container *container.Container) error {
 	var layerID layer.ChainID
 	if container.ImageID != "" {
-		img, err := daemon.stores[container.Platform].imageStore.Get(container.ImageID)
+		img, err := daemon.stores[container.OS].imageStore.Get(container.ImageID)
 		if err != nil {
 			return err
 		}
@@ -266,7 +269,7 @@
 		StorageOpt: container.HostConfig.StorageOpt,
 	}
 
-	rwLayer, err := daemon.stores[container.Platform].layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts)
+	rwLayer, err := daemon.stores[container.OS].layerStore.CreateRWLayer(container.ID, layerID, rwLayerOpts)
 	if err != nil {
 		return err
 	}
diff --git a/daemon/create_unix.go b/daemon/create_unix.go
index 1940e9c..e08e4ea 100644
--- a/daemon/create_unix.go
+++ b/daemon/create_unix.go
@@ -15,8 +15,8 @@
 	"github.com/sirupsen/logrus"
 )
 
-// createContainerPlatformSpecificSettings performs platform specific container create functionality
-func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
+// createContainerOSSpecificSettings performs host-OS specific container create functionality
+func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
 	if err := daemon.Mount(container); err != nil {
 		return err
 	}
diff --git a/daemon/create_windows.go b/daemon/create_windows.go
index c63bfff..c7f075f 100644
--- a/daemon/create_windows.go
+++ b/daemon/create_windows.go
@@ -10,10 +10,10 @@
 	"github.com/docker/docker/volume"
 )
 
-// createContainerPlatformSpecificSettings performs platform specific container create functionality
-func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
+// createContainerOSSpecificSettings performs host-OS specific container create functionality
+func (daemon *Daemon) createContainerOSSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error {
 
-	if container.Platform == runtime.GOOS {
+	if container.OS == runtime.GOOS {
 		// Make sure the host config has the default daemon isolation if not specified by caller.
 		if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) {
 			hostConfig.Isolation = daemon.defaultIsolation
@@ -26,10 +26,10 @@
 		}
 		hostConfig.Isolation = "hyperv"
 	}
-
+	parser := volume.NewParser(container.OS)
 	for spec := range config.Volumes {
 
-		mp, err := volume.ParseMountRaw(spec, hostConfig.VolumeDriver)
+		mp, err := parser.ParseMountRaw(spec, hostConfig.VolumeDriver)
 		if err != nil {
 			return fmt.Errorf("Unrecognised volume spec: %v", err)
 		}
diff --git a/daemon/daemon.go b/daemon/daemon.go
index a11a1f8..96b39e8 100644
--- a/daemon/daemon.go
+++ b/daemon/daemon.go
@@ -18,8 +18,7 @@
 	"sync"
 	"time"
 
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/docker/docker/api"
+	"github.com/docker/docker/api/errdefs"
 	"github.com/docker/docker/api/types"
 	containertypes "github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/swarm"
@@ -29,6 +28,7 @@
 	"github.com/docker/docker/daemon/events"
 	"github.com/docker/docker/daemon/exec"
 	"github.com/docker/docker/daemon/logger"
+	"github.com/docker/docker/daemon/network"
 	"github.com/sirupsen/logrus"
 	// register graph drivers
 	_ "github.com/docker/docker/daemon/graphdriver/register"
@@ -41,12 +41,14 @@
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/migrate/v1"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/sysinfo"
 	"github.com/docker/docker/pkg/system"
 	"github.com/docker/docker/pkg/truncindex"
 	"github.com/docker/docker/plugin"
+	pluginexec "github.com/docker/docker/plugin/executor/containerd"
 	refstore "github.com/docker/docker/reference"
 	"github.com/docker/docker/registry"
 	"github.com/docker/docker/runconfig"
@@ -60,11 +62,10 @@
 	"github.com/pkg/errors"
 )
 
-var (
-	// DefaultRuntimeBinary is the default runtime to be used by
-	// containerd if none is specified
-	DefaultRuntimeBinary = "docker-runc"
+// MainNamespace is the name of the namespace used for users containers
+const MainNamespace = "moby"
 
+var (
 	errSystemNotSupported = errors.New("the Docker daemon is not supported on this platform")
 )
 
@@ -122,6 +123,8 @@
 	pruneRunning     int32
 	hosts            map[string]bool // hosts stores the addresses the daemon is listening on
 	startupDone      chan struct{}
+
+	attachmentStore network.AttachmentStore
 }
 
 // StoreHosts stores the addresses the daemon is listening on
@@ -136,10 +139,7 @@
 
 // HasExperimental returns whether the experimental features of the daemon are enabled or not
 func (daemon *Daemon) HasExperimental() bool {
-	if daemon.configStore != nil && daemon.configStore.Experimental {
-		return true
-	}
-	return false
+	return daemon.configStore != nil && daemon.configStore.Experimental
 }
 
 func (daemon *Daemon) restore() error {
@@ -161,15 +161,15 @@
 		}
 
 		// Ignore the container if it does not support the current driver being used by the graph
-		currentDriverForContainerPlatform := daemon.stores[container.Platform].graphDriver
-		if (container.Driver == "" && currentDriverForContainerPlatform == "aufs") || container.Driver == currentDriverForContainerPlatform {
-			rwlayer, err := daemon.stores[container.Platform].layerStore.GetRWLayer(container.ID)
+		currentDriverForContainerOS := daemon.stores[container.OS].graphDriver
+		if (container.Driver == "" && currentDriverForContainerOS == "aufs") || container.Driver == currentDriverForContainerOS {
+			rwlayer, err := daemon.stores[container.OS].layerStore.GetRWLayer(container.ID)
 			if err != nil {
 				logrus.Errorf("Failed to load container mount %v: %v", id, err)
 				continue
 			}
 			container.RWLayer = rwlayer
-			logrus.Debugf("Loaded container %v", container.ID)
+			logrus.Debugf("Loaded container %v, isRunning: %v", container.ID, container.IsRunning())
 
 			containers[container.ID] = container
 		} else {
@@ -208,8 +208,10 @@
 		}
 	}
 
-	var wg sync.WaitGroup
-	var mapLock sync.Mutex
+	var (
+		wg      sync.WaitGroup
+		mapLock sync.Mutex
+	)
 	for _, c := range containers {
 		wg.Add(1)
 		go func(c *container.Container) {
@@ -220,11 +222,74 @@
 			}
 
 			daemon.setStateCounter(c)
+
+			logrus.WithFields(logrus.Fields{
+				"container": c.ID,
+				"running":   c.IsRunning(),
+				"paused":    c.IsPaused(),
+			}).Debug("restoring container")
+
+			var (
+				err      error
+				alive    bool
+				ec       uint32
+				exitedAt time.Time
+			)
+
+			alive, _, err = daemon.containerd.Restore(context.Background(), c.ID, c.InitializeStdio)
+			if err != nil && !errdefs.IsNotFound(err) {
+				logrus.Errorf("Failed to restore container %s with containerd: %s", c.ID, err)
+				return
+			}
+			if !alive {
+				ec, exitedAt, err = daemon.containerd.DeleteTask(context.Background(), c.ID)
+				if err != nil && !errdefs.IsNotFound(err) {
+					logrus.WithError(err).Errorf("Failed to delete container %s from containerd", c.ID)
+					return
+				}
+			}
+
 			if c.IsRunning() || c.IsPaused() {
 				c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
-				if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil {
-					logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
-					return
+
+				if c.IsPaused() && alive {
+					s, err := daemon.containerd.Status(context.Background(), c.ID)
+					if err != nil {
+						logrus.WithError(err).WithField("container", c.ID).
+							Errorf("Failed to get container status")
+					} else {
+						logrus.WithField("container", c.ID).WithField("state", s).
+							Info("restored container paused")
+						switch s {
+						case libcontainerd.StatusPaused, libcontainerd.StatusPausing:
+							// nothing to do
+						case libcontainerd.StatusStopped:
+							alive = false
+						case libcontainerd.StatusUnknown:
+							logrus.WithField("container", c.ID).
+								Error("Unknown status for container during restore")
+						default:
+							// running
+							c.Lock()
+							c.Paused = false
+							daemon.setStateCounter(c)
+							if err := c.CheckpointTo(daemon.containersReplica); err != nil {
+								logrus.WithError(err).WithField("container", c.ID).
+									Error("Failed to update stopped container state")
+							}
+							c.Unlock()
+						}
+					}
+				}
+
+				if !alive {
+					c.Lock()
+					c.SetStopped(&container.ExitStatus{ExitCode: int(ec), ExitedAt: exitedAt})
+					daemon.Cleanup(c)
+					if err := c.CheckpointTo(daemon.containersReplica); err != nil {
+						logrus.Errorf("Failed to update stopped container %s state: %v", c.ID, err)
+					}
+					c.Unlock()
 				}
 
 				// we call Mount and then Unmount to get BaseFs of the container
@@ -252,11 +317,9 @@
 					activeSandboxes[c.NetworkSettings.SandboxID] = options
 					mapLock.Unlock()
 				}
+			} else {
+				// get list of containers we need to restart
 
-			}
-			// fixme: only if not running
-			// get list of containers we need to restart
-			if !c.IsRunning() && !c.IsPaused() {
 				// Do not autostart containers which
 				// has endpoints in a swarm scope
 				// network yet since the cluster is
@@ -288,7 +351,7 @@
 				c.RemovalInProgress = false
 				c.Dead = true
 				if err := c.CheckpointTo(daemon.containersReplica); err != nil {
-					logrus.Errorf("Failed to update container %s state: %v", c.ID, err)
+					logrus.Errorf("Failed to update RemovalInProgress container %s state: %v", c.ID, err)
 				}
 			}
 			c.Unlock()
@@ -489,6 +552,8 @@
 	} else {
 		logrus.Warnf("failed to initiate ingress network removal: %v", err)
 	}
+
+	daemon.attachmentStore.ClearAttachments()
 }
 
 // setClusterProvider sets a component for querying the current cluster state.
@@ -552,10 +617,21 @@
 	if err != nil {
 		return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
 	}
-	os.Setenv("TMPDIR", realTmp)
+	if runtime.GOOS == "windows" {
+		if _, err := os.Stat(realTmp); err != nil && os.IsNotExist(err) {
+			if err := system.MkdirAll(realTmp, 0700, ""); err != nil {
+				return nil, fmt.Errorf("Unable to create the TempDir (%s): %s", realTmp, err)
+			}
+		}
+		os.Setenv("TEMP", realTmp)
+		os.Setenv("TMP", realTmp)
+	} else {
+		os.Setenv("TMPDIR", realTmp)
+	}
 
 	d := &Daemon{
 		configStore: config,
+		PluginStore: pluginStore,
 		startupDone: make(chan struct{}),
 	}
 	// Ensure the daemon is properly shutdown if there is a failure during
@@ -603,6 +679,16 @@
 		return nil, err
 	}
 
+	// Create the directory where we'll store the runtime scripts (i.e. in
+	// order to support runtimeArgs)
+	daemonRuntimes := filepath.Join(config.Root, "runtimes")
+	if err := system.MkdirAll(daemonRuntimes, 0700, ""); err != nil && !os.IsExist(err) {
+		return nil, err
+	}
+	if err := d.loadRuntimes(); err != nil {
+		return nil, err
+	}
+
 	if runtime.GOOS == "windows" {
 		if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil && !os.IsExist(err) {
 			return nil, err
@@ -632,7 +718,6 @@
 	}
 
 	d.RegistryService = registryService
-	d.PluginStore = pluginStore
 	logger.RegisterPluginGetter(d.PluginStore)
 
 	metricsSockPath, err := d.listenMetricsSock()
@@ -641,12 +726,16 @@
 	}
 	registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
 
+	createPluginExec := func(m *plugin.Manager) (plugin.Executor, error) {
+		return pluginexec.New(getPluginExecRoot(config.Root), containerdRemote, m)
+	}
+
 	// Plugin system initialization should happen before restore. Do not change order.
 	d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
 		Root:               filepath.Join(config.Root, "plugins"),
 		ExecRoot:           getPluginExecRoot(config.Root),
 		Store:              d.PluginStore,
-		Executor:           containerdRemote,
+		CreateExecutor:     createPluginExec,
 		RegistryService:    registryService,
 		LiveRestoreEnabled: config.LiveRestoreEnabled,
 		LogPluginEvent:     d.LogPluginEvent, // todo: make private
@@ -657,7 +746,7 @@
 	}
 
 	var graphDrivers []string
-	for platform, ds := range d.stores {
+	for operatingSystem, ds := range d.stores {
 		ls, err := layer.NewStoreFromOptions(layer.StoreOptions{
 			StorePath:                 config.Root,
 			MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
@@ -666,14 +755,14 @@
 			IDMappings:                idMappings,
 			PluginGetter:              d.PluginStore,
 			ExperimentalEnabled:       config.Experimental,
-			Platform:                  platform,
+			OS:                        operatingSystem,
 		})
 		if err != nil {
 			return nil, err
 		}
 		ds.graphDriver = ls.DriverName() // As layerstore may set the driver
 		ds.layerStore = ls
-		d.stores[platform] = ds
+		d.stores[operatingSystem] = ds
 		graphDrivers = append(graphDrivers, ls.DriverName())
 	}
 
@@ -684,13 +773,13 @@
 
 	logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
 	lsMap := make(map[string]layer.Store)
-	for platform, ds := range d.stores {
-		lsMap[platform] = ds.layerStore
+	for operatingSystem, ds := range d.stores {
+		lsMap[operatingSystem] = ds.layerStore
 	}
 	d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads)
 	logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
 	d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
-	for platform, ds := range d.stores {
+	for operatingSystem, ds := range d.stores {
 		imageRoot := filepath.Join(config.Root, "image", ds.graphDriver)
 		ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
 		if err != nil {
@@ -698,13 +787,13 @@
 		}
 
 		var is image.Store
-		is, err = image.NewImageStore(ifs, platform, ds.layerStore)
+		is, err = image.NewImageStore(ifs, operatingSystem, ds.layerStore)
 		if err != nil {
 			return nil, err
 		}
 		ds.imageRoot = imageRoot
 		ds.imageStore = is
-		d.stores[platform] = ds
+		d.stores[operatingSystem] = ds
 	}
 
 	// Configure the volumes driver
@@ -713,7 +802,7 @@
 		return nil, err
 	}
 
-	trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
+	trustKey, err := loadOrCreateTrustKey(config.TrustKeyPath)
 	if err != nil {
 		return nil, err
 	}
@@ -795,13 +884,13 @@
 	d.idMappings = idMappings
 	d.seccompEnabled = sysInfo.Seccomp
 	d.apparmorEnabled = sysInfo.AppArmor
+	d.containerdRemote = containerdRemote
 
 	d.linkIndex = newLinkIndex()
-	d.containerdRemote = containerdRemote
 
 	go d.execCommandGC()
 
-	d.containerd, err = containerdRemote.Client(d)
+	d.containerd, err = containerdRemote.NewClient(MainNamespace, d)
 	if err != nil {
 		return nil, err
 	}
@@ -860,7 +949,7 @@
 
 	// Wait without timeout for the container to exit.
 	// Ignore the result.
-	_ = <-c.Wait(context.Background(), container.WaitConditionNotRunning)
+	<-c.Wait(context.Background(), container.WaitConditionNotRunning)
 	return nil
 }
 
@@ -904,7 +993,8 @@
 	}
 
 	if daemon.containers != nil {
-		logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout)
+		logrus.Debugf("daemon configured with a %d seconds minimum shutdown timeout", daemon.configStore.ShutdownTimeout)
+		logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.ShutdownTimeout())
 		daemon.containers.ApplyAll(func(c *container.Container) {
 			if !c.IsRunning() {
 				return
@@ -914,7 +1004,7 @@
 				logrus.Errorf("Stop container error: %v", err)
 				return
 			}
-			if mountid, err := daemon.stores[c.Platform].layerStore.GetMountID(c.ID); err == nil {
+			if mountid, err := daemon.stores[c.OS].layerStore.GetMountID(c.ID); err == nil {
 				daemon.cleanupMountsByID(mountid)
 			}
 			logrus.Debugf("container stopped %s", c.ID)
@@ -967,14 +1057,14 @@
 	}
 	logrus.Debugf("container mounted via layerStore: %v", dir)
 
-	if container.BaseFS != dir {
+	if container.BaseFS != nil && container.BaseFS.Path() != dir.Path() {
 		// The mount path reported by the graph driver should always be trusted on Windows, since the
 		// volume path for a given mounted layer may change over time.  This should only be an error
 		// on non-Windows operating systems.
-		if container.BaseFS != "" && runtime.GOOS != "windows" {
+		if runtime.GOOS != "windows" {
 			daemon.Unmount(container)
 			return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
-				daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir)
+				daemon.GraphDriverName(container.OS), container.ID, container.BaseFS, dir)
 		}
 	}
 	container.BaseFS = dir // TODO: combine these fields
@@ -1034,7 +1124,7 @@
 					logrus.Warnf("failed to delete old tmp directory: %s", newName)
 				}
 			}()
-		} else {
+		} else if !os.IsNotExist(err) {
 			logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
 			if err := os.RemoveAll(tmpDir); err != nil {
 				logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
@@ -1046,7 +1136,7 @@
 	return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs)
 }
 
-func (daemon *Daemon) setupInitLayer(initPath string) error {
+func (daemon *Daemon) setupInitLayer(initPath containerfs.ContainerFS) error {
 	rootIDs := daemon.idMappings.RootPair()
 	return initlayer.Setup(initPath, rootIDs)
 }
@@ -1164,19 +1254,6 @@
 	return options, nil
 }
 
-func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
-	out := make([]types.BlkioStatEntry, len(entries))
-	for i, re := range entries {
-		out[i] = types.BlkioStatEntry{
-			Major: re.Major,
-			Minor: re.Minor,
-			Op:    re.Op,
-			Value: re.Value,
-		}
-	}
-	return out
-}
-
 // GetCluster returns the cluster
 func (daemon *Daemon) GetCluster() Cluster {
 	return daemon.cluster
@@ -1243,3 +1320,8 @@
 		resources.MemorySwappiness = nil
 	}
 }
+
+// GetAttachmentStore returns current attachment store associated with the daemon
+func (daemon *Daemon) GetAttachmentStore() *network.AttachmentStore {
+	return &daemon.attachmentStore
+}
diff --git a/daemon/daemon_solaris.go b/daemon/daemon_solaris.go
deleted file mode 100644
index 156d111..0000000
--- a/daemon/daemon_solaris.go
+++ /dev/null
@@ -1,529 +0,0 @@
-// +build solaris,cgo
-
-package daemon
-
-import (
-	"fmt"
-	"net"
-	"strconv"
-
-	"github.com/docker/docker/api/types"
-	containertypes "github.com/docker/docker/api/types/container"
-	"github.com/docker/docker/container"
-	"github.com/docker/docker/daemon/config"
-	"github.com/docker/docker/image"
-	"github.com/docker/docker/pkg/fileutils"
-	"github.com/docker/docker/pkg/idtools"
-	"github.com/docker/docker/pkg/parsers/kernel"
-	"github.com/docker/docker/pkg/sysinfo"
-	"github.com/docker/libnetwork"
-	nwconfig "github.com/docker/libnetwork/config"
-	"github.com/docker/libnetwork/drivers/solaris/bridge"
-	"github.com/docker/libnetwork/netlabel"
-	"github.com/docker/libnetwork/netutils"
-	lntypes "github.com/docker/libnetwork/types"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/opencontainers/selinux/go-selinux/label"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-)
-
-//#include <zone.h>
-import "C"
-
-const (
-	platformSupported   = true
-	solarisMinCPUShares = 1
-	solarisMaxCPUShares = 65535
-)
-
-func getMemoryResources(config containertypes.Resources) specs.CappedMemory {
-	memory := specs.CappedMemory{
-		DisableOOMKiller: config.OomKillDisable,
-	}
-
-	if config.Memory > 0 {
-		memory.Physical = strconv.FormatInt(config.Memory, 10)
-	}
-
-	if config.MemorySwap != 0 {
-		memory.Swap = strconv.FormatInt(config.MemorySwap, 10)
-	}
-
-	return memory
-}
-
-func getCPUResources(config containertypes.Resources) specs.CappedCPU {
-	cpu := specs.CappedCPU{}
-
-	if config.CpusetCpus != "" {
-		cpu.Ncpus = config.CpusetCpus
-	}
-
-	return cpu
-}
-
-func (daemon *Daemon) cleanupMountsByID(id string) error {
-	return nil
-}
-
-func (daemon *Daemon) parseSecurityOpt(container *container.Container, hostConfig *containertypes.HostConfig) error {
-	return parseSecurityOpt(container, hostConfig)
-}
-
-func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error {
-	//Since hostConfig.SecurityOpt is specifically defined as a "List of string values to
-	//customize labels for MLs systems, such as SELinux"
-	//until we figure out how to map to Trusted Extensions
-	//this is being disabled for now on Solaris
-	var (
-		labelOpts []string
-		err       error
-	)
-
-	if len(config.SecurityOpt) > 0 {
-		return errors.New("Security options are not supported on Solaris")
-	}
-
-	container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts)
-	return err
-}
-
-func setupRemappedRoot(config *config.Config) (*idtools.IDMappings, error) {
-	return nil, nil
-}
-
-func setupDaemonRoot(config *config.Config, rootDir string, rootIDs idtools.IDPair) error {
-	return nil
-}
-
-func (daemon *Daemon) getLayerInit() func(string) error {
-	return nil
-}
-
-func checkKernel() error {
-	// solaris can rely upon checkSystem() below, we don't skew kernel versions
-	return nil
-}
-
-func (daemon *Daemon) getCgroupDriver() string {
-	return ""
-}
-
-func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error {
-	if hostConfig.CPUShares < 0 {
-		logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, solarisMinCPUShares)
-		hostConfig.CPUShares = solarisMinCPUShares
-	} else if hostConfig.CPUShares > solarisMaxCPUShares {
-		logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, solarisMaxCPUShares)
-		hostConfig.CPUShares = solarisMaxCPUShares
-	}
-
-	if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 {
-		// By default, MemorySwap is set to twice the size of Memory.
-		hostConfig.MemorySwap = hostConfig.Memory * 2
-	}
-
-	if hostConfig.ShmSize != 0 {
-		hostConfig.ShmSize = container.DefaultSHMSize
-	}
-	if hostConfig.OomKillDisable == nil {
-		defaultOomKillDisable := false
-		hostConfig.OomKillDisable = &defaultOomKillDisable
-	}
-
-	return nil
-}
-
-// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd
-func UsingSystemd(config *config.Config) bool {
-	return false
-}
-
-// verifyPlatformContainerSettings performs platform-specific validation of the
-// hostconfig and config structures.
-func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) {
-	fixMemorySwappiness(resources)
-	warnings := []string{}
-	sysInfo := sysinfo.New(true)
-	// NOTE: We do not enforce a minimum value for swap limits for zones on Solaris and
-	// therefore we will not do that for Docker container either.
-	if hostConfig.Memory > 0 && !sysInfo.MemoryLimit {
-		warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.")
-		logrus.Warnf("Your kernel does not support memory limit capabilities. Limitation discarded.")
-		hostConfig.Memory = 0
-		hostConfig.MemorySwap = -1
-	}
-	if hostConfig.Memory > 0 && hostConfig.MemorySwap != -1 && !sysInfo.SwapLimit {
-		warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.")
-		logrus.Warnf("Your kernel does not support swap limit capabilities, memory limited without swap.")
-		hostConfig.MemorySwap = -1
-	}
-	if hostConfig.Memory > 0 && hostConfig.MemorySwap > 0 && hostConfig.MemorySwap < hostConfig.Memory {
-		return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage.")
-	}
-	// Solaris NOTE: We allow and encourage setting the swap without setting the memory limit.
-
-	if hostConfig.MemorySwappiness != nil && !sysInfo.MemorySwappiness {
-		warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
-		logrus.Warnf("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.")
-		hostConfig.MemorySwappiness = nil
-	}
-	if hostConfig.MemoryReservation > 0 && !sysInfo.MemoryReservation {
-		warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.")
-		logrus.Warnf("Your kernel does not support memory soft limit capabilities. Limitation discarded.")
-		hostConfig.MemoryReservation = 0
-	}
-	if hostConfig.Memory > 0 && hostConfig.MemoryReservation > 0 && hostConfig.Memory < hostConfig.MemoryReservation {
-		return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage.")
-	}
-	if hostConfig.KernelMemory > 0 && !sysInfo.KernelMemory {
-		warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.")
-		logrus.Warnf("Your kernel does not support kernel memory limit capabilities. Limitation discarded.")
-		hostConfig.KernelMemory = 0
-	}
-	if hostConfig.CPUShares != 0 && !sysInfo.CPUShares {
-		warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.")
-		logrus.Warnf("Your kernel does not support CPU shares. Shares discarded.")
-		hostConfig.CPUShares = 0
-	}
-	if hostConfig.CPUShares < 0 {
-		warnings = append(warnings, "Invalid CPUShares value. Must be positive. Discarding.")
-		logrus.Warnf("Invalid CPUShares value. Must be positive. Discarding.")
-		hostConfig.CPUQuota = 0
-	}
-	if hostConfig.CPUShares > 0 && !sysinfo.IsCPUSharesAvailable() {
-		warnings = append(warnings, "Global zone default scheduling class not FSS. Discarding shares.")
-		logrus.Warnf("Global zone default scheduling class not FSS. Discarding shares.")
-		hostConfig.CPUShares = 0
-	}
-
-	// Solaris NOTE: Linux does not do negative checking for CPUShares and Quota here. But it makes sense to.
-	if hostConfig.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod {
-		warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.")
-		logrus.Warnf("Your kernel does not support CPU cfs period. Period discarded.")
-		if hostConfig.CPUQuota > 0 {
-			warnings = append(warnings, "Quota will be applied on default period, not period specified.")
-			logrus.Warnf("Quota will be applied on default period, not period specified.")
-		}
-		hostConfig.CPUPeriod = 0
-	}
-	if hostConfig.CPUQuota != 0 && !sysInfo.CPUCfsQuota {
-		warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.")
-		logrus.Warnf("Your kernel does not support CPU cfs quota. Quota discarded.")
-		hostConfig.CPUQuota = 0
-	}
-	if hostConfig.CPUQuota < 0 {
-		warnings = append(warnings, "Invalid CPUQuota value. Must be positive. Discarding.")
-		logrus.Warnf("Invalid CPUQuota value. Must be positive. Discarding.")
-		hostConfig.CPUQuota = 0
-	}
-	if (hostConfig.CpusetCpus != "" || hostConfig.CpusetMems != "") && !sysInfo.Cpuset {
-		warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.")
-		logrus.Warnf("Your kernel does not support cpuset. Cpuset discarded.")
-		hostConfig.CpusetCpus = ""
-		hostConfig.CpusetMems = ""
-	}
-	cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(hostConfig.CpusetCpus)
-	if err != nil {
-		return warnings, fmt.Errorf("Invalid value %s for cpuset cpus.", hostConfig.CpusetCpus)
-	}
-	if !cpusAvailable {
-		return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s.", hostConfig.CpusetCpus, sysInfo.Cpus)
-	}
-	memsAvailable, err := sysInfo.IsCpusetMemsAvailable(hostConfig.CpusetMems)
-	if err != nil {
-		return warnings, fmt.Errorf("Invalid value %s for cpuset mems.", hostConfig.CpusetMems)
-	}
-	if !memsAvailable {
-		return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s.", hostConfig.CpusetMems, sysInfo.Mems)
-	}
-	if hostConfig.BlkioWeight > 0 && !sysInfo.BlkioWeight {
-		warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.")
-		logrus.Warnf("Your kernel does not support Block I/O weight. Weight discarded.")
-		hostConfig.BlkioWeight = 0
-	}
-	if hostConfig.OomKillDisable != nil && !sysInfo.OomKillDisable {
-		*hostConfig.OomKillDisable = false
-		// Don't warn; this is the default setting but only applicable to Linux
-	}
-
-	if sysInfo.IPv4ForwardingDisabled {
-		warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.")
-		logrus.Warnf("IPv4 forwarding is disabled. Networking will not work")
-	}
-
-	// Solaris NOTE: We do not allow setting Linux specific options, so check and warn for all of them.
-
-	if hostConfig.CapAdd != nil || hostConfig.CapDrop != nil {
-		warnings = append(warnings, "Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.")
-		logrus.Warnf("Adding or dropping kernel capabilities unsupported on Solaris.Discarding capabilities lists.")
-		hostConfig.CapAdd = nil
-		hostConfig.CapDrop = nil
-	}
-
-	if hostConfig.GroupAdd != nil {
-		warnings = append(warnings, "Additional groups unsupported on Solaris.Discarding groups lists.")
-		logrus.Warnf("Additional groups unsupported on Solaris.Discarding groups lists.")
-		hostConfig.GroupAdd = nil
-	}
-
-	if hostConfig.IpcMode != "" {
-		warnings = append(warnings, "IPC namespace assignment unsupported on Solaris.Discarding IPC setting.")
-		logrus.Warnf("IPC namespace assignment unsupported on Solaris.Discarding IPC setting.")
-		hostConfig.IpcMode = ""
-	}
-
-	if hostConfig.PidMode != "" {
-		warnings = append(warnings, "PID namespace setting  unsupported on Solaris. Running container in host PID namespace.")
-		logrus.Warnf("PID namespace setting  unsupported on Solaris. Running container in host PID namespace.")
-		hostConfig.PidMode = ""
-	}
-
-	if hostConfig.Privileged {
-		warnings = append(warnings, "Privileged mode unsupported on Solaris. Discarding privileged mode setting.")
-		logrus.Warnf("Privileged mode unsupported on Solaris. Discarding privileged mode setting.")
-		hostConfig.Privileged = false
-	}
-
-	if hostConfig.UTSMode != "" {
-		warnings = append(warnings, "UTS namespace assignment unsupported on Solaris.Discarding UTS setting.")
-		logrus.Warnf("UTS namespace assignment unsupported on Solaris.Discarding UTS setting.")
-		hostConfig.UTSMode = ""
-	}
-
-	if hostConfig.CgroupParent != "" {
-		warnings = append(warnings, "Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.")
-		logrus.Warnf("Specifying Cgroup parent unsupported on Solaris. Discarding cgroup parent setting.")
-		hostConfig.CgroupParent = ""
-	}
-
-	if hostConfig.Ulimits != nil {
-		warnings = append(warnings, "Specifying ulimits unsupported on Solaris. Discarding ulimits setting.")
-		logrus.Warnf("Specifying ulimits unsupported on Solaris. Discarding ulimits setting.")
-		hostConfig.Ulimits = nil
-	}
-
-	return warnings, nil
-}
-
-// reloadPlatform updates configuration with platform specific options
-// and updates the passed attributes
-func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]string) error {
-	return nil
-}
-
-// verifyDaemonSettings performs validation of daemon config struct
-func verifyDaemonSettings(conf *config.Config) error {
-
-	if conf.DefaultRuntime == "" {
-		conf.DefaultRuntime = stockRuntimeName
-	}
-	if conf.Runtimes == nil {
-		conf.Runtimes = make(map[string]types.Runtime)
-	}
-	stockRuntimeOpts := []string{}
-	conf.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts}
-
-	return nil
-}
-
-// checkSystem validates platform-specific requirements
-func checkSystem() error {
-	// check OS version for compatibility, ensure running in global zone
-	var err error
-	var id C.zoneid_t
-
-	if id, err = C.getzoneid(); err != nil {
-		return fmt.Errorf("Exiting. Error getting zone id: %+v", err)
-	}
-	if int(id) != 0 {
-		return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone")
-	}
-
-	v, err := kernel.GetKernelVersion()
-	if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 {
-		return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String())
-	}
-	return err
-}
-
-// configureMaxThreads sets the Go runtime max threads threshold
-// which is 90% of the kernel setting from /proc/sys/kernel/threads-max
-func configureMaxThreads(config *config.Config) error {
-	return nil
-}
-
-// configureKernelSecuritySupport configures and validates security support for the kernel
-func configureKernelSecuritySupport(config *config.Config, driverNames []string) error {
-	return nil
-}
-
-func (daemon *Daemon) initNetworkController(config *config.Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) {
-	netOptions, err := daemon.networkOptions(config, daemon.PluginStore, activeSandboxes)
-	if err != nil {
-		return nil, err
-	}
-
-	controller, err := libnetwork.New(netOptions...)
-	if err != nil {
-		return nil, fmt.Errorf("error obtaining controller instance: %v", err)
-	}
-
-	// Initialize default network on "null"
-	if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)); err != nil {
-		return nil, fmt.Errorf("Error creating default 'null' network: %v", err)
-	}
-
-	if !config.DisableBridge {
-		// Initialize default driver "bridge"
-		if err := initBridgeDriver(controller, config); err != nil {
-			return nil, err
-		}
-	}
-
-	return controller, nil
-}
-
-func initBridgeDriver(controller libnetwork.NetworkController, config *config.Config) error {
-	if n, err := controller.NetworkByName("bridge"); err == nil {
-		if err = n.Delete(); err != nil {
-			return fmt.Errorf("could not delete the default bridge network: %v", err)
-		}
-	}
-
-	bridgeName := bridge.DefaultBridgeName
-	if config.bridgeConfig.Iface != "" {
-		bridgeName = config.bridgeConfig.Iface
-	}
-	netOption := map[string]string{
-		bridge.BridgeName:    bridgeName,
-		bridge.DefaultBridge: strconv.FormatBool(true),
-		netlabel.DriverMTU:   strconv.Itoa(config.Mtu),
-		bridge.EnableICC:     strconv.FormatBool(config.bridgeConfig.InterContainerCommunication),
-	}
-
-	// --ip processing
-	if config.bridgeConfig.DefaultIP != nil {
-		netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String()
-	}
-
-	var ipamV4Conf *libnetwork.IpamConf
-
-	ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)}
-
-	nwList, _, err := netutils.ElectInterfaceAddresses(bridgeName)
-	if err != nil {
-		return errors.Wrap(err, "list bridge addresses failed")
-	}
-
-	nw := nwList[0]
-	if len(nwList) > 1 && config.bridgeConfig.FixedCIDR != "" {
-		_, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR)
-		if err != nil {
-			return errors.Wrap(err, "parse CIDR failed")
-		}
-		// Iterate through in case there are multiple addresses for the bridge
-		for _, entry := range nwList {
-			if fCIDR.Contains(entry.IP) {
-				nw = entry
-				break
-			}
-		}
-	}
-
-	ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String()
-	hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask)
-	if hip.IsGlobalUnicast() {
-		ipamV4Conf.Gateway = nw.IP.String()
-	}
-
-	if config.bridgeConfig.IP != "" {
-		ipamV4Conf.PreferredPool = config.bridgeConfig.IP
-		ip, _, err := net.ParseCIDR(config.bridgeConfig.IP)
-		if err != nil {
-			return err
-		}
-		ipamV4Conf.Gateway = ip.String()
-	} else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" {
-		logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool)
-	}
-
-	if config.bridgeConfig.FixedCIDR != "" {
-		_, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR)
-		if err != nil {
-			return err
-		}
-
-		ipamV4Conf.SubPool = fCIDR.String()
-	}
-
-	if config.bridgeConfig.DefaultGatewayIPv4 != nil {
-		ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String()
-	}
-
-	v4Conf := []*libnetwork.IpamConf{ipamV4Conf}
-	v6Conf := []*libnetwork.IpamConf{}
-
-	// Initialize default network on "bridge" with the same name
-	_, err = controller.NewNetwork("bridge", "bridge", "",
-		libnetwork.NetworkOptionDriverOpts(netOption),
-		libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil),
-		libnetwork.NetworkOptionDeferIPv6Alloc(false))
-	if err != nil {
-		return fmt.Errorf("Error creating default 'bridge' network: %v", err)
-	}
-	return nil
-}
-
-// registerLinks sets up links between containers and writes the
-// configuration out for persistence.
-func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error {
-	return nil
-}
-
-func (daemon *Daemon) cleanupMounts() error {
-	return nil
-}
-
-// conditionalMountOnStart is a platform specific helper function during the
-// container start to call mount.
-func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
-	return daemon.Mount(container)
-}
-
-// conditionalUnmountOnCleanup is a platform specific helper function called
-// during the cleanup of a container to unmount.
-func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
-	return daemon.Unmount(container)
-}
-
-func driverOptions(config *config.Config) []nwconfig.Option {
-	return []nwconfig.Option{}
-}
-
-func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
-	return nil, nil
-}
-
-// setDefaultIsolation determine the default isolation mode for the
-// daemon to run in. This is only applicable on Windows
-func (daemon *Daemon) setDefaultIsolation() error {
-	return nil
-}
-
-func rootFSToAPIType(rootfs *image.RootFS) types.RootFS {
-	return types.RootFS{}
-}
-
-func setupDaemonProcess(config *config.Config) error {
-	return nil
-}
-
-func (daemon *Daemon) setupSeccompProfile() error {
-	return nil
-}
-
-func getRealPath(path string) (string, error) {
-	return fileutils.ReadSymlinkedDirectory(path)
-}
diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go
index 13d1059..4044fad 100644
--- a/daemon/daemon_test.go
+++ b/daemon/daemon_test.go
@@ -1,5 +1,3 @@
-// +build !solaris
-
 package daemon
 
 import (
diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go
index 0082706..b0b6247 100644
--- a/daemon/daemon_unix.go
+++ b/daemon/daemon_unix.go
@@ -5,6 +5,7 @@
 import (
 	"bufio"
 	"bytes"
+	"context"
 	"fmt"
 	"io/ioutil"
 	"net"
@@ -16,6 +17,7 @@
 	"strings"
 	"time"
 
+	containerd_cgroups "github.com/containerd/cgroups"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/blkiodev"
 	pblkiodev "github.com/docker/docker/api/types/blkiodev"
@@ -24,7 +26,9 @@
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/opts"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/parsers"
 	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/sysinfo"
@@ -37,7 +41,6 @@
 	"github.com/docker/libnetwork/netutils"
 	"github.com/docker/libnetwork/options"
 	lntypes "github.com/docker/libnetwork/types"
-	"github.com/golang/protobuf/ptypes"
 	"github.com/opencontainers/runc/libcontainer/cgroups"
 	rsystem "github.com/opencontainers/runc/libcontainer/system"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -49,6 +52,14 @@
 )
 
 const (
+	// DefaultShimBinary is the default shim to be used by containerd if none
+	// is specified
+	DefaultShimBinary = "docker-containerd-shim"
+
+	// DefaultRuntimeBinary is the default runtime to be used by
+	// containerd if none is specified
+	DefaultRuntimeBinary = "docker-runc"
+
 	// See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269
 	linuxMinCPUShares = 2
 	linuxMaxCPUShares = 262144
@@ -62,6 +73,10 @@
 	// constant for cgroup drivers
 	cgroupFsDriver      = "cgroupfs"
 	cgroupSystemdDriver = "systemd"
+
+	// DefaultRuntimeName is the default runtime to be used by
+	// containerd if none is specified
+	DefaultRuntimeName = "docker-runc"
 )
 
 type containerGetter interface {
@@ -612,8 +627,9 @@
 		return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime)
 	}
 
+	parser := volume.NewParser(runtime.GOOS)
 	for dest := range hostConfig.Tmpfs {
-		if err := volume.ValidateTmpfsMountDestination(dest); err != nil {
+		if err := parser.ValidateTmpfsMountDestination(dest); err != nil {
 			return warnings, err
 		}
 	}
@@ -621,6 +637,54 @@
 	return warnings, nil
 }
 
+func (daemon *Daemon) loadRuntimes() error {
+	return daemon.initRuntimes(daemon.configStore.Runtimes)
+}
+
+func (daemon *Daemon) initRuntimes(runtimes map[string]types.Runtime) (err error) {
+	runtimeDir := filepath.Join(daemon.configStore.Root, "runtimes")
+	// Remove old temp directory if any
+	os.RemoveAll(runtimeDir + "-old")
+	tmpDir, err := ioutils.TempDir(daemon.configStore.Root, "gen-runtimes")
+	if err != nil {
+		return errors.Wrapf(err, "failed to get temp dir to generate runtime scripts")
+	}
+	defer func() {
+		if err != nil {
+			if err1 := os.RemoveAll(tmpDir); err1 != nil {
+				logrus.WithError(err1).WithField("dir", tmpDir).
+					Warnf("failed to remove tmp dir")
+			}
+			return
+		}
+
+		if err = os.Rename(runtimeDir, runtimeDir+"-old"); err != nil {
+			return
+		}
+		if err = os.Rename(tmpDir, runtimeDir); err != nil {
+			err = errors.Wrapf(err, "failed to setup runtimes dir, new containers may not start")
+			return
+		}
+		if err = os.RemoveAll(runtimeDir + "-old"); err != nil {
+			logrus.WithError(err).WithField("dir", tmpDir).
+				Warnf("failed to remove old runtimes dir")
+		}
+	}()
+
+	for name, rt := range runtimes {
+		if len(rt.Args) == 0 {
+			continue
+		}
+
+		script := filepath.Join(tmpDir, name)
+		content := fmt.Sprintf("#!/bin/sh\n%s %s $@\n", rt.Path, strings.Join(rt.Args, " "))
+		if err := ioutil.WriteFile(script, []byte(content), 0700); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
 // reloadPlatform updates configuration with platform specific options
 // and updates the passed attributes
 func (daemon *Daemon) reloadPlatform(conf *config.Config, attributes map[string]string) error {
@@ -629,9 +693,12 @@
 	}
 
 	if conf.IsValueSet("runtimes") {
-		daemon.configStore.Runtimes = conf.Runtimes
 		// Always set the default one
-		daemon.configStore.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary}
+		conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary}
+		if err := daemon.initRuntimes(conf.Runtimes); err != nil {
+			return err
+		}
+		daemon.configStore.Runtimes = conf.Runtimes
 	}
 
 	if conf.DefaultRuntime != "" {
@@ -690,7 +757,7 @@
 	if conf.Runtimes == nil {
 		conf.Runtimes = make(map[string]types.Runtime)
 	}
-	conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary}
+	conf.Runtimes[config.StockRuntimeName] = types.Runtime{Path: DefaultRuntimeName}
 
 	return nil
 }
@@ -987,7 +1054,7 @@
 	}
 }
 
-func (daemon *Daemon) getLayerInit() func(string) error {
+func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error {
 	return daemon.setupInitLayer
 }
 
@@ -1212,11 +1279,24 @@
 	return daemon.Unmount(container)
 }
 
+func copyBlkioEntry(entries []*containerd_cgroups.BlkIOEntry) []types.BlkioStatEntry {
+	out := make([]types.BlkioStatEntry, len(entries))
+	for i, re := range entries {
+		out[i] = types.BlkioStatEntry{
+			Major: re.Major,
+			Minor: re.Minor,
+			Op:    re.Op,
+			Value: re.Value,
+		}
+	}
+	return out
+}
+
 func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) {
 	if !c.IsRunning() {
 		return nil, errNotRunning(c.ID)
 	}
-	stats, err := daemon.containerd.Stats(c.ID)
+	cs, err := daemon.containerd.Stats(context.Background(), c.ID)
 	if err != nil {
 		if strings.Contains(err.Error(), "container not found") {
 			return nil, containerNotFound(c.ID)
@@ -1224,54 +1304,98 @@
 		return nil, err
 	}
 	s := &types.StatsJSON{}
-	cgs := stats.CgroupStats
-	if cgs != nil {
+	s.Read = cs.Read
+	stats := cs.Metrics
+	if stats.Blkio != nil {
 		s.BlkioStats = types.BlkioStats{
-			IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive),
-			IoServicedRecursive:     copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive),
-			IoQueuedRecursive:       copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive),
-			IoServiceTimeRecursive:  copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive),
-			IoWaitTimeRecursive:     copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive),
-			IoMergedRecursive:       copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive),
-			IoTimeRecursive:         copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive),
-			SectorsRecursive:        copyBlkioEntry(cgs.BlkioStats.SectorsRecursive),
+			IoServiceBytesRecursive: copyBlkioEntry(stats.Blkio.IoServiceBytesRecursive),
+			IoServicedRecursive:     copyBlkioEntry(stats.Blkio.IoServicedRecursive),
+			IoQueuedRecursive:       copyBlkioEntry(stats.Blkio.IoQueuedRecursive),
+			IoServiceTimeRecursive:  copyBlkioEntry(stats.Blkio.IoServiceTimeRecursive),
+			IoWaitTimeRecursive:     copyBlkioEntry(stats.Blkio.IoWaitTimeRecursive),
+			IoMergedRecursive:       copyBlkioEntry(stats.Blkio.IoMergedRecursive),
+			IoTimeRecursive:         copyBlkioEntry(stats.Blkio.IoTimeRecursive),
+			SectorsRecursive:        copyBlkioEntry(stats.Blkio.SectorsRecursive),
 		}
-		cpu := cgs.CpuStats
+	}
+	if stats.CPU != nil {
 		s.CPUStats = types.CPUStats{
 			CPUUsage: types.CPUUsage{
-				TotalUsage:        cpu.CpuUsage.TotalUsage,
-				PercpuUsage:       cpu.CpuUsage.PercpuUsage,
-				UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode,
-				UsageInUsermode:   cpu.CpuUsage.UsageInUsermode,
+				TotalUsage:        stats.CPU.Usage.Total,
+				PercpuUsage:       stats.CPU.Usage.PerCPU,
+				UsageInKernelmode: stats.CPU.Usage.Kernel,
+				UsageInUsermode:   stats.CPU.Usage.User,
 			},
 			ThrottlingData: types.ThrottlingData{
-				Periods:          cpu.ThrottlingData.Periods,
-				ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods,
-				ThrottledTime:    cpu.ThrottlingData.ThrottledTime,
+				Periods:          stats.CPU.Throttling.Periods,
+				ThrottledPeriods: stats.CPU.Throttling.ThrottledPeriods,
+				ThrottledTime:    stats.CPU.Throttling.ThrottledTime,
 			},
 		}
-		mem := cgs.MemoryStats.Usage
-		s.MemoryStats = types.MemoryStats{
-			Usage:    mem.Usage,
-			MaxUsage: mem.MaxUsage,
-			Stats:    cgs.MemoryStats.Stats,
-			Failcnt:  mem.Failcnt,
-			Limit:    mem.Limit,
-		}
-		// if the container does not set memory limit, use the machineMemory
-		if mem.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
-			s.MemoryStats.Limit = daemon.machineMemory
-		}
-		if cgs.PidsStats != nil {
-			s.PidsStats = types.PidsStats{
-				Current: cgs.PidsStats.Current,
+	}
+
+	if stats.Memory != nil {
+		raw := make(map[string]uint64)
+		raw["cache"] = stats.Memory.Cache
+		raw["rss"] = stats.Memory.RSS
+		raw["rss_huge"] = stats.Memory.RSSHuge
+		raw["mapped_file"] = stats.Memory.MappedFile
+		raw["dirty"] = stats.Memory.Dirty
+		raw["writeback"] = stats.Memory.Writeback
+		raw["pgpgin"] = stats.Memory.PgPgIn
+		raw["pgpgout"] = stats.Memory.PgPgOut
+		raw["pgfault"] = stats.Memory.PgFault
+		raw["pgmajfault"] = stats.Memory.PgMajFault
+		raw["inactive_anon"] = stats.Memory.InactiveAnon
+		raw["active_anon"] = stats.Memory.ActiveAnon
+		raw["inactive_file"] = stats.Memory.InactiveFile
+		raw["active_file"] = stats.Memory.ActiveFile
+		raw["unevictable"] = stats.Memory.Unevictable
+		raw["hierarchical_memory_limit"] = stats.Memory.HierarchicalMemoryLimit
+		raw["hierarchical_memsw_limit"] = stats.Memory.HierarchicalSwapLimit
+		raw["total_cache"] = stats.Memory.TotalCache
+		raw["total_rss"] = stats.Memory.TotalRSS
+		raw["total_rss_huge"] = stats.Memory.TotalRSSHuge
+		raw["total_mapped_file"] = stats.Memory.TotalMappedFile
+		raw["total_dirty"] = stats.Memory.TotalDirty
+		raw["total_writeback"] = stats.Memory.TotalWriteback
+		raw["total_pgpgin"] = stats.Memory.TotalPgPgIn
+		raw["total_pgpgout"] = stats.Memory.TotalPgPgOut
+		raw["total_pgfault"] = stats.Memory.TotalPgFault
+		raw["total_pgmajfault"] = stats.Memory.TotalPgMajFault
+		raw["total_inactive_anon"] = stats.Memory.TotalInactiveAnon
+		raw["total_active_anon"] = stats.Memory.TotalActiveAnon
+		raw["total_inactive_file"] = stats.Memory.TotalInactiveFile
+		raw["total_active_file"] = stats.Memory.TotalActiveFile
+		raw["total_unevictable"] = stats.Memory.TotalUnevictable
+
+		if stats.Memory.Usage != nil {
+			s.MemoryStats = types.MemoryStats{
+				Stats:    raw,
+				Usage:    stats.Memory.Usage.Usage,
+				MaxUsage: stats.Memory.Usage.Max,
+				Limit:    stats.Memory.Usage.Limit,
+				Failcnt:  stats.Memory.Usage.Failcnt,
+			}
+		} else {
+			s.MemoryStats = types.MemoryStats{
+				Stats: raw,
 			}
 		}
+
+		// if the container does not set memory limit, use the machineMemory
+		if s.MemoryStats.Limit > daemon.machineMemory && daemon.machineMemory > 0 {
+			s.MemoryStats.Limit = daemon.machineMemory
+		}
 	}
-	s.Read, err = ptypes.Timestamp(stats.Timestamp)
-	if err != nil {
-		return nil, err
+
+	if stats.Pids != nil {
+		s.PidsStats = types.PidsStats{
+			Current: stats.Pids.Current,
+			Limit:   stats.Pids.Limit,
+		}
 	}
+
 	return s, nil
 }
 
@@ -1295,7 +1419,42 @@
 // setupDaemonProcess sets various settings for the daemon's process
 func setupDaemonProcess(config *config.Config) error {
 	// setup the daemons oom_score_adj
-	return setupOOMScoreAdj(config.OOMScoreAdjust)
+	if err := setupOOMScoreAdj(config.OOMScoreAdjust); err != nil {
+		return err
+	}
+	if err := setMayDetachMounts(); err != nil {
+		logrus.WithError(err).Warn("Could not set may_detach_mounts kernel parameter")
+	}
+	return nil
+}
+
+// This is used to allow removal of mountpoints that may be mounted in other
+// namespaces on RHEL based kernels starting from RHEL 7.4.
+// Without this setting, removals on these RHEL based kernels may fail with
+// "device or resource busy".
+// This setting is not available in upstream kernels as it is not configurable,
+// but has been in the upstream kernels since 3.15.
+func setMayDetachMounts() error {
+	f, err := os.OpenFile("/proc/sys/fs/may_detach_mounts", os.O_WRONLY, 0)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil
+		}
+		return errors.Wrap(err, "error opening may_detach_mounts kernel config file")
+	}
+	defer f.Close()
+
+	_, err = f.WriteString("1")
+	if os.IsPermission(err) {
+		// Setting may_detach_mounts does not work in an
+		// unprivileged container. Ignore the error, but log
+		// it if we appear not to be in that situation.
+		if !rsystem.RunningInUserNS() {
+			logrus.Debugf("Permission denied writing %q to /proc/sys/fs/may_detach_mounts", "1")
+		}
+		return nil
+	}
+	return err
 }
 
 func setupOOMScoreAdj(score int) error {
diff --git a/daemon/daemon_unix_test.go b/daemon/daemon_unix_test.go
index f3a7ce4..a4db473 100644
--- a/daemon/daemon_unix_test.go
+++ b/daemon/daemon_unix_test.go
@@ -1,4 +1,4 @@
-// +build !windows,!solaris
+// +build !windows
 
 package daemon
 
@@ -17,6 +17,7 @@
 	"github.com/docker/docker/volume/drivers"
 	"github.com/docker/docker/volume/local"
 	"github.com/docker/docker/volume/store"
+	"github.com/stretchr/testify/require"
 )
 
 type fakeContainerGetter struct {
@@ -289,13 +290,12 @@
 	containerRoot := filepath.Join(rootDir, "containers")
 	cid := "1234"
 	err = os.MkdirAll(filepath.Join(containerRoot, cid), 0755)
+	require.NoError(t, err)
 
 	vid := "5678"
 	vfsPath := filepath.Join(rootDir, "vfs", "dir", vid)
 	err = os.MkdirAll(vfsPath, 0755)
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 	config := []byte(`
 		{
diff --git a/daemon/daemon_unsupported.go b/daemon/daemon_unsupported.go
index cb1acf6..987528f 100644
--- a/daemon/daemon_unsupported.go
+++ b/daemon/daemon_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!freebsd,!windows,!solaris
+// +build !linux,!freebsd,!windows
 
 package daemon
 
diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go
index f78f60a..a79ed4f 100644
--- a/daemon/daemon_windows.go
+++ b/daemon/daemon_windows.go
@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"context"
 	"fmt"
 	"os"
 	"path/filepath"
@@ -12,6 +13,7 @@
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/image"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/fileutils"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/parsers"
@@ -26,8 +28,10 @@
 	"github.com/docker/libnetwork/netlabel"
 	"github.com/docker/libnetwork/options"
 	blkiodev "github.com/opencontainers/runc/libcontainer/configs"
+	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/windows"
+	"golang.org/x/sys/windows/svc/mgr"
 )
 
 const (
@@ -56,7 +60,7 @@
 	return nil
 }
 
-func (daemon *Daemon) getLayerInit() func(string) error {
+func (daemon *Daemon) getLayerInit() func(containerfs.ContainerFS) error {
 	return nil
 }
 
@@ -234,9 +238,32 @@
 
 	vmcompute := windows.NewLazySystemDLL("vmcompute.dll")
 	if vmcompute.Load() != nil {
-		return fmt.Errorf("Failed to load vmcompute.dll. Ensure that the Containers role is installed.")
+		return fmt.Errorf("failed to load vmcompute.dll, ensure that the Containers feature is installed")
 	}
 
+	// Ensure that the required Host Network Service and vmcompute services
+	// are running. Docker will fail in unexpected ways if this is not present.
+	var requiredServices = []string{"hns", "vmcompute"}
+	if err := ensureServicesInstalled(requiredServices); err != nil {
+		return errors.Wrap(err, "a required service is not installed, ensure the Containers feature is installed")
+	}
+
+	return nil
+}
+
+func ensureServicesInstalled(services []string) error {
+	m, err := mgr.Connect()
+	if err != nil {
+		return err
+	}
+	defer m.Disconnect()
+	for _, service := range services {
+		s, err := m.OpenService(service)
+		if err != nil {
+			return errors.Wrapf(err, "failed to open service %s", service)
+		}
+		s.Close()
+	}
 	return nil
 }
 
@@ -467,12 +494,14 @@
 // conditionalMountOnStart is a platform specific helper function during the
 // container start to call mount.
 func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error {
-	// Bail out now for Linux containers
-	if system.LCOWSupported() && container.Platform != "windows" {
+	// Bail out now for Linux containers. We cannot mount the containers filesystem on the
+	// host as it is a non-Windows filesystem.
+	if system.LCOWSupported() && container.OS != "windows" {
 		return nil
 	}
 
-	// We do not mount if a Hyper-V container
+	// We do not mount if a Hyper-V container as it needs to be mounted inside the
+	// utility VM, not the host.
 	if !daemon.runAsHyperVContainer(container.HostConfig) {
 		return daemon.Mount(container)
 	}
@@ -483,7 +512,7 @@
 // during the cleanup of a container to unmount.
 func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error {
 	// Bail out now for Linux containers
-	if system.LCOWSupported() && container.Platform != "windows" {
+	if system.LCOWSupported() && container.OS != "windows" {
 		return nil
 	}
 
@@ -504,7 +533,7 @@
 	}
 
 	// Obtain the stats from HCS via libcontainerd
-	stats, err := daemon.containerd.Stats(c.ID)
+	stats, err := daemon.containerd.Stats(context.Background(), c.ID)
 	if err != nil {
 		if strings.Contains(err.Error(), "container not found") {
 			return nil, containerNotFound(c.ID)
@@ -514,49 +543,48 @@
 
 	// Start with an empty structure
 	s := &types.StatsJSON{}
-
-	// Populate the CPU/processor statistics
-	s.CPUStats = types.CPUStats{
-		CPUUsage: types.CPUUsage{
-			TotalUsage:        stats.Processor.TotalRuntime100ns,
-			UsageInKernelmode: stats.Processor.RuntimeKernel100ns,
-			UsageInUsermode:   stats.Processor.RuntimeKernel100ns,
-		},
-	}
-
-	// Populate the memory statistics
-	s.MemoryStats = types.MemoryStats{
-		Commit:            stats.Memory.UsageCommitBytes,
-		CommitPeak:        stats.Memory.UsageCommitPeakBytes,
-		PrivateWorkingSet: stats.Memory.UsagePrivateWorkingSetBytes,
-	}
-
-	// Populate the storage statistics
-	s.StorageStats = types.StorageStats{
-		ReadCountNormalized:  stats.Storage.ReadCountNormalized,
-		ReadSizeBytes:        stats.Storage.ReadSizeBytes,
-		WriteCountNormalized: stats.Storage.WriteCountNormalized,
-		WriteSizeBytes:       stats.Storage.WriteSizeBytes,
-	}
-
-	// Populate the network statistics
-	s.Networks = make(map[string]types.NetworkStats)
-
-	for _, nstats := range stats.Network {
-		s.Networks[nstats.EndpointId] = types.NetworkStats{
-			RxBytes:   nstats.BytesReceived,
-			RxPackets: nstats.PacketsReceived,
-			RxDropped: nstats.DroppedPacketsIncoming,
-			TxBytes:   nstats.BytesSent,
-			TxPackets: nstats.PacketsSent,
-			TxDropped: nstats.DroppedPacketsOutgoing,
-		}
-	}
-
-	// Set the timestamp
-	s.Stats.Read = stats.Timestamp
+	s.Stats.Read = stats.Read
 	s.Stats.NumProcs = platform.NumProcs()
 
+	if stats.HCSStats != nil {
+		hcss := stats.HCSStats
+		// Populate the CPU/processor statistics
+		s.CPUStats = types.CPUStats{
+			CPUUsage: types.CPUUsage{
+				TotalUsage:        hcss.Processor.TotalRuntime100ns,
+				UsageInKernelmode: hcss.Processor.RuntimeKernel100ns,
+				UsageInUsermode:   hcss.Processor.RuntimeKernel100ns,
+			},
+		}
+
+		// Populate the memory statistics
+		s.MemoryStats = types.MemoryStats{
+			Commit:            hcss.Memory.UsageCommitBytes,
+			CommitPeak:        hcss.Memory.UsageCommitPeakBytes,
+			PrivateWorkingSet: hcss.Memory.UsagePrivateWorkingSetBytes,
+		}
+
+		// Populate the storage statistics
+		s.StorageStats = types.StorageStats{
+			ReadCountNormalized:  hcss.Storage.ReadCountNormalized,
+			ReadSizeBytes:        hcss.Storage.ReadSizeBytes,
+			WriteCountNormalized: hcss.Storage.WriteCountNormalized,
+			WriteSizeBytes:       hcss.Storage.WriteSizeBytes,
+		}
+
+		// Populate the network statistics
+		s.Networks = make(map[string]types.NetworkStats)
+		for _, nstats := range hcss.Network {
+			s.Networks[nstats.EndpointId] = types.NetworkStats{
+				RxBytes:   nstats.BytesReceived,
+				RxPackets: nstats.PacketsReceived,
+				RxDropped: nstats.DroppedPacketsIncoming,
+				TxBytes:   nstats.BytesSent,
+				TxPackets: nstats.PacketsSent,
+				TxDropped: nstats.DroppedPacketsOutgoing,
+			}
+		}
+	}
 	return s, nil
 }
 
@@ -636,3 +664,11 @@
 	}
 	return fileutils.ReadSymlinkedDirectory(path)
 }
+
+func (daemon *Daemon) loadRuntimes() error {
+	return nil
+}
+
+func (daemon *Daemon) initRuntimes(_ map[string]types.Runtime) error {
+	return nil
+}
diff --git a/daemon/daemon_windows_test.go b/daemon/daemon_windows_test.go
new file mode 100644
index 0000000..8b350cf
--- /dev/null
+++ b/daemon/daemon_windows_test.go
@@ -0,0 +1,72 @@
+// +build windows
+
+package daemon
+
+import (
+	"strings"
+	"testing"
+
+	"golang.org/x/sys/windows/svc/mgr"
+)
+
+const existingService = "Power"
+
+func TestEnsureServicesExist(t *testing.T) {
+	m, err := mgr.Connect()
+	if err != nil {
+		t.Fatal("failed to connect to service manager, this test needs admin")
+	}
+	defer m.Disconnect()
+	s, err := m.OpenService(existingService)
+	if err != nil {
+		t.Fatalf("expected to find known inbox service %q, this test needs a known inbox service to run correctly", existingService)
+	}
+	defer s.Close()
+
+	input := []string{existingService}
+	err = ensureServicesInstalled(input)
+	if err != nil {
+		t.Fatalf("unexpected error for input %q: %q", input, err)
+	}
+}
+
+func TestEnsureServicesExistErrors(t *testing.T) {
+	m, err := mgr.Connect()
+	if err != nil {
+		t.Fatal("failed to connect to service manager, this test needs admin")
+	}
+	defer m.Disconnect()
+	s, err := m.OpenService(existingService)
+	if err != nil {
+		t.Fatalf("expected to find known inbox service %q, this test needs a known inbox service to run correctly", existingService)
+	}
+	defer s.Close()
+
+	for _, testcase := range []struct {
+		input         []string
+		expectedError string
+	}{
+		{
+			input:         []string{"daemon_windows_test_fakeservice"},
+			expectedError: "failed to open service daemon_windows_test_fakeservice",
+		},
+		{
+			input:         []string{"daemon_windows_test_fakeservice1", "daemon_windows_test_fakeservice2"},
+			expectedError: "failed to open service daemon_windows_test_fakeservice1",
+		},
+		{
+			input:         []string{existingService, "daemon_windows_test_fakeservice"},
+			expectedError: "failed to open service daemon_windows_test_fakeservice",
+		},
+	} {
+		t.Run(strings.Join(testcase.input, ";"), func(t *testing.T) {
+			err := ensureServicesInstalled(testcase.input)
+			if err == nil {
+				t.Fatalf("expected error for input %v", testcase.input)
+			}
+			if !strings.Contains(err.Error(), testcase.expectedError) {
+				t.Fatalf("expected error %q to contain %q", err.Error(), testcase.expectedError)
+			}
+		})
+	}
+}
diff --git a/daemon/debugtrap_unsupported.go b/daemon/debugtrap_unsupported.go
index f5b9170..6ae9ebf 100644
--- a/daemon/debugtrap_unsupported.go
+++ b/daemon/debugtrap_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!darwin,!freebsd,!windows,!solaris
+// +build !linux,!darwin,!freebsd,!windows
 
 package daemon
 
diff --git a/daemon/debugtrap_windows.go b/daemon/debugtrap_windows.go
index f7d95c0..916bbb4 100644
--- a/daemon/debugtrap_windows.go
+++ b/daemon/debugtrap_windows.go
@@ -15,10 +15,11 @@
 	// Windows does not support signals like *nix systems. So instead of
 	// trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be
 	// signaled. ACL'd to builtin administrators and local system
-	ev, _ := windows.UTF16PtrFromString("Global\\docker-daemon-" + fmt.Sprint(os.Getpid()))
+	event := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid())
+	ev, _ := windows.UTF16PtrFromString(event)
 	sd, err := winio.SddlToSecurityDescriptor("D:P(A;;GA;;;BA)(A;;GA;;;SY)")
 	if err != nil {
-		logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", ev, err.Error())
+		logrus.Errorf("failed to get security descriptor for debug stackdump event %s: %s", event, err.Error())
 		return
 	}
 	var sa windows.SecurityAttributes
@@ -27,11 +28,11 @@
 	sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
 	h, err := windows.CreateEvent(&sa, 0, 0, ev)
 	if h == 0 || err != nil {
-		logrus.Errorf("failed to create debug stackdump event %s: %s", ev, err.Error())
+		logrus.Errorf("failed to create debug stackdump event %s: %s", event, err.Error())
 		return
 	}
 	go func() {
-		logrus.Debugf("Stackdump - waiting signal at %s", ev)
+		logrus.Debugf("Stackdump - waiting signal at %s", event)
 		for {
 			windows.WaitForSingleObject(h, windows.INFINITE)
 			path, err := signal.DumpStacks(root)
diff --git a/daemon/delete.go b/daemon/delete.go
index 99b515d..4d56d14 100644
--- a/daemon/delete.go
+++ b/daemon/delete.go
@@ -11,6 +11,7 @@
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/layer"
 	"github.com/docker/docker/pkg/system"
+	"github.com/docker/docker/volume"
 	volumestore "github.com/docker/docker/volume/store"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -116,15 +117,19 @@
 	// When container creation fails and `RWLayer` has not been created yet, we
 	// do not call `ReleaseRWLayer`
 	if container.RWLayer != nil {
-		metadata, err := daemon.stores[container.Platform].layerStore.ReleaseRWLayer(container.RWLayer)
+		metadata, err := daemon.stores[container.OS].layerStore.ReleaseRWLayer(container.RWLayer)
 		layer.LogReleaseMetadata(metadata)
 		if err != nil && err != layer.ErrMountDoesNotExist && !os.IsNotExist(errors.Cause(err)) {
-			return errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.Platform), container.ID)
+			e := errors.Wrapf(err, "driver %q failed to remove root filesystem for %s", daemon.GraphDriverName(container.OS), container.ID)
+			container.SetRemovalError(e)
+			return e
 		}
 	}
 
 	if err := system.EnsureRemoveAll(container.Root); err != nil {
-		return errors.Wrapf(err, "unable to remove filesystem for %s", container.ID)
+		e := errors.Wrapf(err, "unable to remove filesystem for %s", container.ID)
+		container.SetRemovalError(e)
+		return e
 	}
 
 	linkNames := daemon.linkIndex.delete(container)
@@ -140,6 +145,7 @@
 	}
 	container.SetRemoved()
 	stateCtr.del(container.ID)
+
 	daemon.LogContainerEvent(container, "destroy")
 	return nil
 }
@@ -148,10 +154,19 @@
 // If the volume is referenced by a container it is not removed
 // This is called directly from the Engine API
 func (daemon *Daemon) VolumeRm(name string, force bool) error {
-	err := daemon.volumeRm(name)
+	v, err := daemon.volumes.Get(name)
+	if err != nil {
+		if force && volumestore.IsNotExist(err) {
+			return nil
+		}
+		return err
+	}
+
+	err = daemon.volumeRm(v)
 	if err != nil && volumestore.IsInUse(err) {
 		return stateConflictError{err}
 	}
+
 	if err == nil || force {
 		daemon.volumes.Purge(name)
 		return nil
@@ -159,12 +174,7 @@
 	return err
 }
 
-func (daemon *Daemon) volumeRm(name string) error {
-	v, err := daemon.volumes.Get(name)
-	if err != nil {
-		return err
-	}
-
+func (daemon *Daemon) volumeRm(v volume.Volume) error {
 	if err := daemon.volumes.Remove(v); err != nil {
 		return errors.Wrap(err, "unable to remove volume")
 	}
diff --git a/daemon/discovery/discovery_test.go b/daemon/discovery/discovery_test.go
index f084a64..d5c9966 100644
--- a/daemon/discovery/discovery_test.go
+++ b/daemon/discovery/discovery_test.go
@@ -4,92 +4,77 @@
 	"fmt"
 	"testing"
 	"time"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
+func TestDiscoveryOptsErrors(t *testing.T) {
+	var testcases = []struct {
+		doc  string
+		opts map[string]string
+	}{
+		{
+			doc:  "discovery.ttl < discovery.heartbeat",
+			opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"},
+		},
+		{
+			doc:  "discovery.ttl == discovery.heartbeat",
+			opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"},
+		},
+		{
+			doc:  "negative discovery.heartbeat",
+			opts: map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"},
+		},
+		{
+			doc:  "negative discovery.ttl",
+			opts: map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"},
+		},
+		{
+			doc:  "invalid discovery.heartbeat",
+			opts: map[string]string{"discovery.heartbeat": "invalid"},
+		},
+		{
+			doc:  "invalid discovery.ttl",
+			opts: map[string]string{"discovery.ttl": "invalid"},
+		},
+	}
+
+	for _, testcase := range testcases {
+		_, _, err := discoveryOpts(testcase.opts)
+		assert.Error(t, err, testcase.doc)
+	}
+}
+
 func TestDiscoveryOpts(t *testing.T) {
-	clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"}
+	clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"}
 	heartbeat, ttl, err := discoveryOpts(clusterOpts)
-	if err == nil {
-		t.Fatal("discovery.ttl < discovery.heartbeat must fail")
-	}
-
-	clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"}
-	heartbeat, ttl, err = discoveryOpts(clusterOpts)
-	if err == nil {
-		t.Fatal("discovery.ttl == discovery.heartbeat must fail")
-	}
-
-	clusterOpts = map[string]string{"discovery.heartbeat": "-10", "discovery.ttl": "10"}
-	heartbeat, ttl, err = discoveryOpts(clusterOpts)
-	if err == nil {
-		t.Fatal("negative discovery.heartbeat must fail")
-	}
-
-	clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "-10"}
-	heartbeat, ttl, err = discoveryOpts(clusterOpts)
-	if err == nil {
-		t.Fatal("negative discovery.ttl must fail")
-	}
-
-	clusterOpts = map[string]string{"discovery.heartbeat": "invalid"}
-	heartbeat, ttl, err = discoveryOpts(clusterOpts)
-	if err == nil {
-		t.Fatal("invalid discovery.heartbeat must fail")
-	}
-
-	clusterOpts = map[string]string{"discovery.ttl": "invalid"}
-	heartbeat, ttl, err = discoveryOpts(clusterOpts)
-	if err == nil {
-		t.Fatal("invalid discovery.ttl must fail")
-	}
-
-	clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"}
-	heartbeat, ttl, err = discoveryOpts(clusterOpts)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if heartbeat != 10*time.Second {
-		t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat)
-	}
-
-	if ttl != 20*time.Second {
-		t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl)
-	}
+	require.NoError(t, err)
+	assert.Equal(t, 10*time.Second, heartbeat)
+	assert.Equal(t, 20*time.Second, ttl)
 
 	clusterOpts = map[string]string{"discovery.heartbeat": "10"}
 	heartbeat, ttl, err = discoveryOpts(clusterOpts)
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if heartbeat != 10*time.Second {
-		t.Fatalf("Heartbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat)
-	}
-
-	expected := 10 * defaultDiscoveryTTLFactor * time.Second
-	if ttl != expected {
-		t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl)
-	}
+	require.NoError(t, err)
+	assert.Equal(t, 10*time.Second, heartbeat)
+	assert.Equal(t, 10*defaultDiscoveryTTLFactor*time.Second, ttl)
 
 	clusterOpts = map[string]string{"discovery.ttl": "30"}
 	heartbeat, ttl, err = discoveryOpts(clusterOpts)
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 	if ttl != 30*time.Second {
 		t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl)
 	}
 
-	expected = 30 * time.Second / defaultDiscoveryTTLFactor
+	expected := 30 * time.Second / defaultDiscoveryTTLFactor
 	if heartbeat != expected {
 		t.Fatalf("Heartbeat - Expected : %v, Actual : %v", expected, heartbeat)
 	}
 
 	discoveryTTL := fmt.Sprintf("%d", defaultDiscoveryTTLFactor-1)
 	clusterOpts = map[string]string{"discovery.ttl": discoveryTTL}
-	heartbeat, ttl, err = discoveryOpts(clusterOpts)
+	heartbeat, _, err = discoveryOpts(clusterOpts)
 	if err == nil && heartbeat == 0 {
 		t.Fatal("discovery.heartbeat must be positive")
 	}
diff --git a/daemon/disk_usage.go b/daemon/disk_usage.go
index a28463e..7142ef0 100644
--- a/daemon/disk_usage.go
+++ b/daemon/disk_usage.go
@@ -99,7 +99,6 @@
 	for platform := range daemon.stores {
 		layerRefs := daemon.getLayerRefs(platform)
 		allLayers := daemon.stores[platform].layerStore.Map()
-		var allLayersSize int64
 		for _, l := range allLayers {
 			select {
 			case <-ctx.Done():
diff --git a/daemon/errors.go b/daemon/errors.go
index cd8de4d..889261f 100644
--- a/daemon/errors.go
+++ b/daemon/errors.go
@@ -64,6 +64,22 @@
 	return stateConflictError{cause}
 }
 
+func errNotPaused(id string) error {
+	cause := errors.Errorf("Container %s is already paused", id)
+	return stateConflictError{cause}
+}
+
+type nameConflictError struct {
+	id   string
+	name string
+}
+
+func (e nameConflictError) Error() string {
+	return fmt.Sprintf("Conflict. The container name %q is already in use by container %q. You have to remove (or rename) that container to be able to reuse that name.", e.name, e.id)
+}
+
+func (nameConflictError) Conflict() {}
+
 type validationError struct {
 	cause error
 }
diff --git a/daemon/events/filter.go b/daemon/events/filter.go
index 645f1ca..61e3459 100644
--- a/daemon/events/filter.go
+++ b/daemon/events/filter.go
@@ -27,6 +27,10 @@
 		ef.matchVolume(ev) &&
 		ef.matchNetwork(ev) &&
 		ef.matchImage(ev) &&
+		ef.matchNode(ev) &&
+		ef.matchService(ev) &&
+		ef.matchSecret(ev) &&
+		ef.matchConfig(ev) &&
 		ef.matchLabels(ev.Actor.Attributes)
 }
 
@@ -49,14 +53,14 @@
 }
 
 func (ef *Filter) matchScope(scope string) bool {
-	if !ef.filter.Include("scope") {
+	if !ef.filter.Contains("scope") {
 		return true
 	}
 	return ef.filter.ExactMatch("scope", scope)
 }
 
 func (ef *Filter) matchLabels(attributes map[string]string) bool {
-	if !ef.filter.Include("label") {
+	if !ef.filter.Contains("label") {
 		return true
 	}
 	return ef.filter.MatchKVList("label", attributes)
diff --git a/daemon/exec.go b/daemon/exec.go
index 9b3e583..afdfc9c 100644
--- a/daemon/exec.go
+++ b/daemon/exec.go
@@ -13,10 +13,10 @@
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/container/stream"
 	"github.com/docker/docker/daemon/exec"
-	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/pkg/pools"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/docker/docker/pkg/term"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 )
@@ -31,6 +31,14 @@
 	d.execCommands.Add(config.ID, config)
 }
 
+func (d *Daemon) registerExecPidUnlocked(container *container.Container, config *exec.Config) {
+	logrus.Debugf("registering pid %v for exec %v", config.Pid, config.ID)
+	// Storing execs in container in order to kill them gracefully whenever the container is stopped or removed.
+	container.ExecCommands.SetPidUnlocked(config.ID, config.Pid)
+	// Storing execs in daemon for easy access via Engine API.
+	d.execCommands.SetPidUnlocked(config.ID, config.Pid)
+}
+
 // ExecExists looks up the exec instance and returns a bool if it exists or not.
 // It will also return the error produced by `getConfig`
 func (d *Daemon) ExecExists(name string) (bool, error) {
@@ -70,8 +78,8 @@
 }
 
 func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) {
-	container.ExecCommands.Delete(execConfig.ID)
-	d.execCommands.Delete(execConfig.ID)
+	container.ExecCommands.Delete(execConfig.ID, execConfig.Pid)
+	d.execCommands.Delete(execConfig.ID, execConfig.Pid)
 }
 
 func (d *Daemon) getActiveContainer(name string) (*container.Container, error) {
@@ -181,7 +189,7 @@
 				logrus.Errorf("failed to cleanup exec %s streams: %s", c.ID, err)
 			}
 			ec.Unlock()
-			c.ExecCommands.Delete(ec.ID)
+			c.ExecCommands.Delete(ec.ID, ec.Pid)
 		}
 	}()
 
@@ -207,13 +215,17 @@
 		ec.StreamConfig.NewNopInputPipe()
 	}
 
-	p := libcontainerd.Process{
+	p := &specs.Process{
 		Args:     append([]string{ec.Entrypoint}, ec.Args...),
 		Env:      ec.Env,
 		Terminal: ec.Tty,
+		Cwd:      c.Config.WorkingDir,
+	}
+	if p.Cwd == "" {
+		p.Cwd = "/"
 	}
 
-	if err := execSetPlatformOpt(c, ec, &p); err != nil {
+	if err := d.execSetPlatformOpt(c, ec, p); err != nil {
 		return err
 	}
 
@@ -231,22 +243,28 @@
 	ec.StreamConfig.AttachStreams(&attachConfig)
 	attachErr := ec.StreamConfig.CopyStreams(ctx, &attachConfig)
 
-	systemPid, err := d.containerd.AddProcess(ctx, c.ID, name, p, ec.InitializeStdio)
+	// Synchronize with libcontainerd event loop
+	ec.Lock()
+	c.ExecCommands.Lock()
+	systemPid, err := d.containerd.Exec(ctx, c.ID, ec.ID, p, cStdin != nil, ec.InitializeStdio)
 	if err != nil {
+		c.ExecCommands.Unlock()
+		ec.Unlock()
 		return translateContainerdStartErr(ec.Entrypoint, ec.SetExitCode, err)
 	}
-	ec.Lock()
 	ec.Pid = systemPid
+	d.registerExecPidUnlocked(c, ec)
+	c.ExecCommands.Unlock()
 	ec.Unlock()
 
 	select {
 	case <-ctx.Done():
 		logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID)
-		d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"]))
+		d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["TERM"]))
 		select {
 		case <-time.After(termProcessTimeout * time.Second):
 			logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout)
-			d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"]))
+			d.containerd.SignalProcess(ctx, c.ID, name, int(signal.SignalMap["KILL"]))
 		case <-attachErr:
 			// TERM signal worked
 		}
@@ -273,7 +291,7 @@
 		for id, config := range d.execCommands.Commands() {
 			if config.CanRemove {
 				cleaned++
-				d.execCommands.Delete(id)
+				d.execCommands.Delete(id, config.Pid)
 			} else {
 				if _, exists := liveExecCommands[id]; !exists {
 					config.CanRemove = true
diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go
index 2348ad8..7aa2383 100644
--- a/daemon/exec/exec.go
+++ b/daemon/exec/exec.go
@@ -4,6 +4,7 @@
 	"runtime"
 	"sync"
 
+	"github.com/containerd/containerd"
 	"github.com/docker/docker/container/stream"
 	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/pkg/stringid"
@@ -42,8 +43,26 @@
 	}
 }
 
+type cio struct {
+	containerd.IO
+
+	sc *stream.Config
+}
+
+func (i *cio) Close() error {
+	i.IO.Close()
+
+	return i.sc.CloseStreams()
+}
+
+func (i *cio) Wait() {
+	i.sc.Wait()
+
+	i.IO.Wait()
+}
+
 // InitializeStdio is called by libcontainerd to connect the stdio.
-func (c *Config) InitializeStdio(iop libcontainerd.IOPipe) error {
+func (c *Config) InitializeStdio(iop *libcontainerd.IOPipe) (containerd.IO, error) {
 	c.StreamConfig.CopyToPipe(iop)
 
 	if c.StreamConfig.Stdin() == nil && !c.Tty && runtime.GOOS == "windows" {
@@ -54,7 +73,7 @@
 		}
 	}
 
-	return nil
+	return &cio{IO: iop, sc: c.StreamConfig}, nil
 }
 
 // CloseStreams closes the stdio streams for the exec
@@ -69,45 +88,66 @@
 
 // Store keeps track of the exec configurations.
 type Store struct {
-	commands map[string]*Config
+	byID  map[string]*Config
+	byPid map[int]*Config
 	sync.RWMutex
 }
 
 // NewStore initializes a new exec store.
 func NewStore() *Store {
-	return &Store{commands: make(map[string]*Config, 0)}
+	return &Store{
+		byID:  make(map[string]*Config),
+		byPid: make(map[int]*Config),
+	}
 }
 
 // Commands returns the exec configurations in the store.
 func (e *Store) Commands() map[string]*Config {
 	e.RLock()
-	commands := make(map[string]*Config, len(e.commands))
-	for id, config := range e.commands {
-		commands[id] = config
+	byID := make(map[string]*Config, len(e.byID))
+	for id, config := range e.byID {
+		byID[id] = config
 	}
 	e.RUnlock()
-	return commands
+	return byID
 }
 
 // Add adds a new exec configuration to the store.
 func (e *Store) Add(id string, Config *Config) {
 	e.Lock()
-	e.commands[id] = Config
+	e.byID[id] = Config
 	e.Unlock()
 }
 
+// SetPidUnlocked adds an association between a Pid and a config, it does not
+// synchronized with other operations.
+func (e *Store) SetPidUnlocked(id string, pid int) {
+	if config, ok := e.byID[id]; ok {
+		e.byPid[pid] = config
+	}
+}
+
 // Get returns an exec configuration by its id.
 func (e *Store) Get(id string) *Config {
 	e.RLock()
-	res := e.commands[id]
+	res := e.byID[id]
+	e.RUnlock()
+	return res
+}
+
+// ByPid returns an exec configuration by its pid.
+func (e *Store) ByPid(pid int) *Config {
+	e.RLock()
+	res := e.byPid[pid]
 	e.RUnlock()
 	return res
 }
 
 // Delete removes an exec configuration from the store.
-func (e *Store) Delete(id string) {
+func (e *Store) Delete(id string, pid int) {
 	e.Lock()
-	delete(e.commands, id)
+	delete(e.byPid, pid)
+	delete(e.byID, id)
 	e.Unlock()
 }
 
@@ -115,7 +155,7 @@
 func (e *Store) List() []string {
 	var IDs []string
 	e.RLock()
-	for id := range e.commands {
+	for id := range e.byID {
 		IDs = append(IDs, id)
 	}
 	e.RUnlock()
diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go
index bb11c11..525ce01 100644
--- a/daemon/exec_linux.go
+++ b/daemon/exec_linux.go
@@ -4,25 +4,30 @@
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/caps"
 	"github.com/docker/docker/daemon/exec"
-	"github.com/docker/docker/libcontainerd"
 	"github.com/opencontainers/runc/libcontainer/apparmor"
 	"github.com/opencontainers/runtime-spec/specs-go"
 )
 
-func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error {
+func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config, p *specs.Process) error {
 	if len(ec.User) > 0 {
 		uid, gid, additionalGids, err := getUser(c, ec.User)
 		if err != nil {
 			return err
 		}
-		p.User = &specs.User{
+		p.User = specs.User{
 			UID:            uid,
 			GID:            gid,
 			AdditionalGids: additionalGids,
 		}
 	}
 	if ec.Privileged {
-		p.Capabilities = caps.GetAllCapabilities()
+		if p.Capabilities == nil {
+			p.Capabilities = &specs.LinuxCapabilities{}
+		}
+		p.Capabilities.Bounding = caps.GetAllCapabilities()
+		p.Capabilities.Permitted = p.Capabilities.Bounding
+		p.Capabilities.Inheritable = p.Capabilities.Bounding
+		p.Capabilities.Effective = p.Capabilities.Bounding
 	}
 	if apparmor.IsEnabled() {
 		var appArmorProfile string
@@ -46,5 +51,6 @@
 			}
 		}
 	}
+	daemon.setRlimits(&specs.Spec{Process: p}, c)
 	return nil
 }
diff --git a/daemon/exec_solaris.go b/daemon/exec_solaris.go
index 7003355..7c1fc20 100644
--- a/daemon/exec_solaris.go
+++ b/daemon/exec_solaris.go
@@ -3,9 +3,9 @@
 import (
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/exec"
-	"github.com/docker/docker/libcontainerd"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 
-func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error {
+func (daemon *Daemon) execSetPlatformOpt(_ *container.Container, _ *exec.Config, _ *specs.Process) error {
 	return nil
 }
diff --git a/daemon/exec_windows.go b/daemon/exec_windows.go
index b7b4514..d8754eb 100644
--- a/daemon/exec_windows.go
+++ b/daemon/exec_windows.go
@@ -3,12 +3,12 @@
 import (
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/daemon/exec"
-	"github.com/docker/docker/libcontainerd"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 
-func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error {
+func (daemon *Daemon) execSetPlatformOpt(c *container.Container, ec *exec.Config, p *specs.Process) error {
 	// Process arguments need to be escaped before sending to OCI.
-	if c.Platform == "windows" {
+	if c.OS == "windows" {
 		p.Args = escapeArgs(p.Args)
 		p.User.Username = ec.User
 	}
diff --git a/daemon/export.go b/daemon/export.go
index 081e163..c0e6aff 100644
--- a/daemon/export.go
+++ b/daemon/export.go
@@ -18,8 +18,18 @@
 		return err
 	}
 
-	if runtime.GOOS == "windows" && container.Platform == "windows" {
-		return fmt.Errorf("the daemon on this platform does not support exporting Windows containers")
+	if runtime.GOOS == "windows" && container.OS == "windows" {
+		return fmt.Errorf("the daemon on this operating system does not support exporting Windows containers")
+	}
+
+	if container.IsDead() {
+		err := fmt.Errorf("You cannot export container %s which is Dead", container.ID)
+		return stateConflictError{err}
+	}
+
+	if container.IsRemovalInProgress() {
+		err := fmt.Errorf("You cannot export container %s which is being removed", container.ID)
+		return stateConflictError{err}
 	}
 
 	data, err := daemon.containerExport(container)
@@ -35,23 +45,35 @@
 	return nil
 }
 
-func (daemon *Daemon) containerExport(container *container.Container) (io.ReadCloser, error) {
-	if err := daemon.Mount(container); err != nil {
+func (daemon *Daemon) containerExport(container *container.Container) (arch io.ReadCloser, err error) {
+	rwlayer, err := daemon.stores[container.OS].layerStore.GetRWLayer(container.ID)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		if err != nil {
+			daemon.stores[container.OS].layerStore.ReleaseRWLayer(rwlayer)
+		}
+	}()
+
+	_, err = rwlayer.Mount(container.GetMountLabel())
+	if err != nil {
 		return nil, err
 	}
 
-	archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{
+	archive, err := archivePath(container.BaseFS, container.BaseFS.Path(), &archive.TarOptions{
 		Compression: archive.Uncompressed,
 		UIDMaps:     daemon.idMappings.UIDs(),
 		GIDMaps:     daemon.idMappings.GIDs(),
 	})
 	if err != nil {
-		daemon.Unmount(container)
+		rwlayer.Unmount()
 		return nil, err
 	}
-	arch := ioutils.NewReadCloserWrapper(archive, func() error {
+	arch = ioutils.NewReadCloserWrapper(archive, func() error {
 		err := archive.Close()
-		daemon.Unmount(container)
+		rwlayer.Unmount()
+		daemon.stores[container.OS].layerStore.ReleaseRWLayer(rwlayer)
 		return err
 	})
 	daemon.LogContainerEvent(container, "export")
diff --git a/daemon/getsize_unix.go b/daemon/getsize_unix.go
index e47e646..fff90f2 100644
--- a/daemon/getsize_unix.go
+++ b/daemon/getsize_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris
+// +build linux freebsd
 
 package daemon
 
diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go
index 05822a7..8313263 100644
--- a/daemon/graphdriver/aufs/aufs.go
+++ b/daemon/graphdriver/aufs/aufs.go
@@ -38,6 +38,7 @@
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/directory"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/locker"
@@ -388,12 +389,12 @@
 
 // Get returns the rootfs path for the id.
 // This will mount the dir at its given path
-func (a *Driver) Get(id, mountLabel string) (string, error) {
+func (a *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
 	a.locker.Lock(id)
 	defer a.locker.Unlock(id)
 	parents, err := a.getParentLayerPaths(id)
 	if err != nil && !os.IsNotExist(err) {
-		return "", err
+		return nil, err
 	}
 
 	a.pathCacheLock.Lock()
@@ -407,21 +408,21 @@
 		}
 	}
 	if count := a.ctr.Increment(m); count > 1 {
-		return m, nil
+		return containerfs.NewLocalContainerFS(m), nil
 	}
 
 	// If a dir does not have a parent ( no layers )do not try to mount
 	// just return the diff path to the data
 	if len(parents) > 0 {
 		if err := a.mount(id, m, mountLabel, parents); err != nil {
-			return "", err
+			return nil, err
 		}
 	}
 
 	a.pathCacheLock.Lock()
 	a.pathCache[id] = m
 	a.pathCacheLock.Unlock()
-	return m, nil
+	return containerfs.NewLocalContainerFS(m), nil
 }
 
 // Put unmounts and updates list of active mounts.
diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go
index 532074e..a58f18a 100644
--- a/daemon/graphdriver/aufs/aufs_test.go
+++ b/daemon/graphdriver/aufs/aufs_test.go
@@ -9,15 +9,16 @@
 	"io/ioutil"
 	"os"
 	"path"
+	"path/filepath"
 	"sync"
 	"testing"
 
-	"path/filepath"
-
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/reexec"
 	"github.com/docker/docker/pkg/stringid"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 var (
@@ -41,6 +42,14 @@
 	return d
 }
 
+func driverGet(d *Driver, id string, mntLabel string) (string, error) {
+	mnt, err := d.Get(id, mntLabel)
+	if err != nil {
+		return "", err
+	}
+	return mnt.Path(), nil
+}
+
 func newDriver(t testing.TB) *Driver {
 	if err := os.MkdirAll(tmp, 0755); err != nil {
 		t.Fatal(err)
@@ -170,7 +179,7 @@
 		t.Fatal(err)
 	}
 	expected := path.Join(tmp, "diff", "1")
-	if diffPath != expected {
+	if diffPath.Path() != expected {
 		t.Fatalf("Expected path %s got %s", expected, diffPath)
 	}
 }
@@ -179,9 +188,8 @@
 	d := newDriver(t)
 	defer os.RemoveAll(tmp)
 
-	if err := d.Cleanup(); err != nil {
-		t.Fatal(err)
-	}
+	err := d.Cleanup()
+	assert.NoError(t, err)
 }
 
 func TestCleanupWithDir(t *testing.T) {
@@ -201,18 +209,12 @@
 	d := newDriver(t)
 	defer os.RemoveAll(tmp)
 
-	if err := d.Create("1", "", nil); err != nil {
-		t.Fatal(err)
-	}
+	err := d.Create("1", "", nil)
+	require.NoError(t, err)
 
 	response, err := d.mounted(d.getDiffPath("1"))
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if response != false {
-		t.Fatal("Response if dir id 1 is mounted should be false")
-	}
+	require.NoError(t, err)
+	assert.False(t, response)
 }
 
 func TestMountedTrueResponse(t *testing.T) {
@@ -220,26 +222,17 @@
 	defer os.RemoveAll(tmp)
 	defer d.Cleanup()
 
-	if err := d.Create("1", "", nil); err != nil {
-		t.Fatal(err)
-	}
-	if err := d.Create("2", "1", nil); err != nil {
-		t.Fatal(err)
-	}
+	err := d.Create("1", "", nil)
+	require.NoError(t, err)
+	err = d.Create("2", "1", nil)
+	require.NoError(t, err)
 
-	_, err := d.Get("2", "")
-	if err != nil {
-		t.Fatal(err)
-	}
+	_, err = d.Get("2", "")
+	require.NoError(t, err)
 
 	response, err := d.mounted(d.pathCache["2"])
-	if err != nil {
-		t.Fatal(err)
-	}
-
-	if response != true {
-		t.Fatal("Response if dir id 2 is mounted should be true")
-	}
+	require.NoError(t, err)
+	assert.True(t, response)
 }
 
 func TestMountWithParent(t *testing.T) {
@@ -263,13 +256,13 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	if mntPath == "" {
-		t.Fatal("mntPath should not be empty string")
+	if mntPath == nil {
+		t.Fatal("mntPath should not be nil")
 	}
 
 	expected := path.Join(tmp, "mnt", "2")
-	if mntPath != expected {
-		t.Fatalf("Expected %s got %s", expected, mntPath)
+	if mntPath.Path() != expected {
+		t.Fatalf("Expected %s got %s", expected, mntPath.Path())
 	}
 }
 
@@ -294,8 +287,8 @@
 	if err != nil {
 		t.Fatal(err)
 	}
-	if mntPath == "" {
-		t.Fatal("mntPath should not be empty string")
+	if mntPath == nil {
+		t.Fatal("mntPath should not be nil")
 	}
 
 	mounted, err := d.mounted(d.pathCache["2"])
@@ -329,7 +322,7 @@
 		t.Fatal(err)
 	}
 
-	diffPath, err := d.Get("1", "")
+	diffPath, err := driverGet(d, "1", "")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -373,7 +366,7 @@
 		}
 	}()
 
-	mntPoint, err := d.Get("2", "")
+	mntPoint, err := driverGet(d, "2", "")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -412,7 +405,7 @@
 	if err := d.CreateReadWrite("3", "2", nil); err != nil {
 		t.Fatal(err)
 	}
-	mntPoint, err = d.Get("3", "")
+	mntPoint, err = driverGet(d, "3", "")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -458,7 +451,7 @@
 		t.Fatal(err)
 	}
 
-	diffPath, err := d.Get("1", "")
+	diffPath, err := driverGet(d, "1", "")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -500,7 +493,7 @@
 		t.Fatal(err)
 	}
 
-	diffPath, err := d.Get("1", "")
+	diffPath, err := driverGet(d, "1", "")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -574,9 +567,8 @@
 	}
 
 	status := d.Status()
-	if status == nil || len(status) == 0 {
-		t.Fatal("Status should not be nil or empty")
-	}
+	assert.Len(t, status, 4)
+
 	rootDir := status[0]
 	dirs := status[2]
 	if rootDir[0] != "Root Dir" {
@@ -602,7 +594,7 @@
 		t.Fatal(err)
 	}
 
-	diffPath, err := d.Get("1", "")
+	diffPath, err := driverGet(d, "1", "")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -637,7 +629,7 @@
 
 	// Ensure that the file is in the mount point for id 3
 
-	mountPoint, err := d.Get("3", "")
+	mountPoint, err := driverGet(d, "3", "")
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -677,48 +669,34 @@
 		}
 		current = hash(current)
 
-		if err := d.CreateReadWrite(current, parent, nil); err != nil {
-			t.Logf("Current layer %d", i)
-			t.Error(err)
-		}
-		point, err := d.Get(current, "")
-		if err != nil {
-			t.Logf("Current layer %d", i)
-			t.Error(err)
-		}
+		err := d.CreateReadWrite(current, parent, nil)
+		require.NoError(t, err, "current layer %d", i)
+
+		point, err := driverGet(d, current, "")
+		require.NoError(t, err, "current layer %d", i)
+
 		f, err := os.Create(path.Join(point, current))
-		if err != nil {
-			t.Logf("Current layer %d", i)
-			t.Error(err)
-		}
+		require.NoError(t, err, "current layer %d", i)
 		f.Close()
 
 		if i%10 == 0 {
-			if err := os.Remove(path.Join(point, parent)); err != nil {
-				t.Logf("Current layer %d", i)
-				t.Error(err)
-			}
+			err := os.Remove(path.Join(point, parent))
+			require.NoError(t, err, "current layer %d", i)
 			expected--
 		}
 		last = current
 	}
 
 	// Perform the actual mount for the top most image
-	point, err := d.Get(last, "")
-	if err != nil {
-		t.Error(err)
-	}
+	point, err := driverGet(d, last, "")
+	require.NoError(t, err)
 	files, err := ioutil.ReadDir(point)
-	if err != nil {
-		t.Error(err)
-	}
-	if len(files) != expected {
-		t.Errorf("Expected %d got %d", expected, len(files))
-	}
+	require.NoError(t, err)
+	assert.Len(t, files, expected)
 }
 
 func TestMountMoreThan42Layers(t *testing.T) {
-	os.RemoveAll(tmpOuter)
+	defer os.RemoveAll(tmpOuter)
 	testMountMoreThan42Layers(t, tmp)
 }
 
diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go
index dcdfc9a..5a61217 100644
--- a/daemon/graphdriver/btrfs/btrfs.go
+++ b/daemon/graphdriver/btrfs/btrfs.go
@@ -27,6 +27,7 @@
 	"unsafe"
 
 	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/parsers"
@@ -631,29 +632,29 @@
 }
 
 // Get the requested filesystem id.
-func (d *Driver) Get(id, mountLabel string) (string, error) {
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
 	dir := d.subvolumesDirID(id)
 	st, err := os.Stat(dir)
 	if err != nil {
-		return "", err
+		return nil, err
 	}
 
 	if !st.IsDir() {
-		return "", fmt.Errorf("%s: not a directory", dir)
+		return nil, fmt.Errorf("%s: not a directory", dir)
 	}
 
 	if quota, err := ioutil.ReadFile(d.quotasDirID(id)); err == nil {
 		if size, err := strconv.ParseUint(string(quota), 10, 64); err == nil && size >= d.options.minSpace {
 			if err := d.subvolEnableQuota(); err != nil {
-				return "", err
+				return nil, err
 			}
 			if err := subvolLimitQgroup(dir, size); err != nil {
-				return "", err
+				return nil, err
 			}
 		}
 	}
 
-	return dir, nil
+	return containerfs.NewLocalContainerFS(dir), nil
 }
 
 // Put is not implemented for BTRFS as there is no cleanup required for the id.
diff --git a/daemon/graphdriver/btrfs/btrfs_test.go b/daemon/graphdriver/btrfs/btrfs_test.go
index 0038dbc..056b99b 100644
--- a/daemon/graphdriver/btrfs/btrfs_test.go
+++ b/daemon/graphdriver/btrfs/btrfs_test.go
@@ -35,12 +35,14 @@
 	}
 	defer graphtest.PutDriver(t)
 
-	dir, err := d.Get("test", "")
+	dirFS, err := d.Get("test", "")
 	if err != nil {
 		t.Fatal(err)
 	}
 	defer d.Put("test")
 
+	dir := dirFS.Path()
+
 	if err := subvolCreate(dir, "subvoltest"); err != nil {
 		t.Fatal(err)
 	}
diff --git a/daemon/graphdriver/copy/copy.go b/daemon/graphdriver/copy/copy.go
new file mode 100644
index 0000000..8ec458d
--- /dev/null
+++ b/daemon/graphdriver/copy/copy.go
@@ -0,0 +1,234 @@
+// +build linux
+
+package copy
+
+/*
+#include <linux/fs.h>
+
+#ifndef FICLONE
+#define FICLONE		_IOW(0x94, 9, int)
+#endif
+*/
+import "C"
+import (
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"syscall"
+	"time"
+
+	"github.com/docker/docker/pkg/pools"
+	"github.com/docker/docker/pkg/system"
+	rsystem "github.com/opencontainers/runc/libcontainer/system"
+	"golang.org/x/sys/unix"
+)
+
+// Mode indicates whether to use hardlink or copy content
+type Mode int
+
+const (
+	// Content creates a new file, and copies the content of the file
+	Content Mode = iota
+	// Hardlink creates a new hardlink to the existing file
+	Hardlink
+)
+
+func copyRegular(srcPath, dstPath string, fileinfo os.FileInfo, copyWithFileRange, copyWithFileClone *bool) error {
+	srcFile, err := os.Open(srcPath)
+	if err != nil {
+		return err
+	}
+	defer srcFile.Close()
+
+	// If the destination file already exists, we shouldn't blow it away
+	dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_EXCL, fileinfo.Mode())
+	if err != nil {
+		return err
+	}
+	defer dstFile.Close()
+
+	if *copyWithFileClone {
+		_, _, err = unix.Syscall(unix.SYS_IOCTL, dstFile.Fd(), C.FICLONE, srcFile.Fd())
+		if err == nil {
+			return nil
+		}
+
+		*copyWithFileClone = false
+		if err == unix.EXDEV {
+			*copyWithFileRange = false
+		}
+	}
+	if *copyWithFileRange {
+		err = doCopyWithFileRange(srcFile, dstFile, fileinfo)
+		// Trying the file_clone may not have caught the exdev case
+		// as the ioctl may not have been available (therefore EINVAL)
+		if err == unix.EXDEV || err == unix.ENOSYS {
+			*copyWithFileRange = false
+		} else if err != nil {
+			return err
+		}
+	}
+	return legacyCopy(srcFile, dstFile)
+}
+
+func doCopyWithFileRange(srcFile, dstFile *os.File, fileinfo os.FileInfo) error {
+	amountLeftToCopy := fileinfo.Size()
+
+	for amountLeftToCopy > 0 {
+		n, err := unix.CopyFileRange(int(srcFile.Fd()), nil, int(dstFile.Fd()), nil, int(amountLeftToCopy), 0)
+		if err != nil {
+			return err
+		}
+
+		amountLeftToCopy = amountLeftToCopy - int64(n)
+	}
+
+	return nil
+}
+
+func legacyCopy(srcFile io.Reader, dstFile io.Writer) error {
+	_, err := pools.Copy(dstFile, srcFile)
+
+	return err
+}
+
+func copyXattr(srcPath, dstPath, attr string) error {
+	data, err := system.Lgetxattr(srcPath, attr)
+	if err != nil {
+		return err
+	}
+	if data != nil {
+		if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// DirCopy copies or hardlinks the contents of one directory to another,
+// properly handling xattrs, and soft links
+func DirCopy(srcDir, dstDir string, copyMode Mode) error {
+	copyWithFileRange := true
+	copyWithFileClone := true
+	err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		// Rebase path
+		relPath, err := filepath.Rel(srcDir, srcPath)
+		if err != nil {
+			return err
+		}
+
+		dstPath := filepath.Join(dstDir, relPath)
+		if err != nil {
+			return err
+		}
+
+		stat, ok := f.Sys().(*syscall.Stat_t)
+		if !ok {
+			return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
+		}
+
+		isHardlink := false
+
+		switch f.Mode() & os.ModeType {
+		case 0: // Regular file
+			if copyMode == Hardlink {
+				isHardlink = true
+				if err2 := os.Link(srcPath, dstPath); err2 != nil {
+					return err2
+				}
+			} else {
+				if err2 := copyRegular(srcPath, dstPath, f, &copyWithFileRange, &copyWithFileClone); err2 != nil {
+					return err2
+				}
+			}
+
+		case os.ModeDir:
+			if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
+				return err
+			}
+
+		case os.ModeSymlink:
+			link, err := os.Readlink(srcPath)
+			if err != nil {
+				return err
+			}
+
+			if err := os.Symlink(link, dstPath); err != nil {
+				return err
+			}
+
+		case os.ModeNamedPipe:
+			fallthrough
+		case os.ModeSocket:
+			if rsystem.RunningInUserNS() {
+				// cannot create a device if running in user namespace
+				return nil
+			}
+			if err := unix.Mkfifo(dstPath, stat.Mode); err != nil {
+				return err
+			}
+
+		case os.ModeDevice:
+			if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
+				return err
+			}
+
+		default:
+			return fmt.Errorf("unknown file type for %s", srcPath)
+		}
+
+		// Everything below is copying metadata from src to dst. All this metadata
+		// already shares an inode for hardlinks.
+		if isHardlink {
+			return nil
+		}
+
+		if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
+			return err
+		}
+
+		if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
+			return err
+		}
+
+		// We need to copy this attribute if it appears in an overlay upper layer, as
+		// this function is used to copy those. It is set by overlay if a directory
+		// is removed and then re-created and should not inherit anything from the
+		// same dir in the lower dir.
+		if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
+			return err
+		}
+
+		isSymlink := f.Mode()&os.ModeSymlink != 0
+
+		// There is no LChmod, so ignore mode for symlink. Also, this
+		// must happen after chown, as that can modify the file mode
+		if !isSymlink {
+			if err := os.Chmod(dstPath, f.Mode()); err != nil {
+				return err
+			}
+		}
+
+		// system.Chtimes doesn't support a NOFOLLOW flag atm
+		// nolint: unconvert
+		if !isSymlink {
+			aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec))
+			mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec))
+			if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
+				return err
+			}
+		} else {
+			ts := []syscall.Timespec{stat.Atim, stat.Mtim}
+			if err := system.LUtimesNano(dstPath, ts); err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+	return err
+}
diff --git a/daemon/graphdriver/copy/copy_test.go b/daemon/graphdriver/copy/copy_test.go
new file mode 100644
index 0000000..6976503
--- /dev/null
+++ b/daemon/graphdriver/copy/copy_test.go
@@ -0,0 +1,67 @@
+// +build linux
+
+package copy
+
+import (
+	"io/ioutil"
+	"math/rand"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/docker/docker/pkg/parsers/kernel"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestIsCopyFileRangeSyscallAvailable(t *testing.T) {
+	// Verifies:
+	// 1. That copyFileRangeEnabled is being set to true when copy_file_range syscall is available
+	// 2. That isCopyFileRangeSyscallAvailable() works on "new" kernels
+	v, err := kernel.GetKernelVersion()
+	require.NoError(t, err)
+
+	copyWithFileRange := true
+	copyWithFileClone := false
+	doCopyTest(t, &copyWithFileRange, &copyWithFileClone)
+
+	if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 5, Minor: 0}) < 0 {
+		assert.False(t, copyWithFileRange)
+	} else {
+		assert.True(t, copyWithFileRange)
+	}
+
+}
+
+func TestCopy(t *testing.T) {
+	copyWithFileRange := true
+	copyWithFileClone := true
+	doCopyTest(t, &copyWithFileRange, &copyWithFileClone)
+}
+
+func TestCopyWithoutRange(t *testing.T) {
+	copyWithFileRange := false
+	copyWithFileClone := false
+	doCopyTest(t, &copyWithFileRange, &copyWithFileClone)
+}
+
+func doCopyTest(t *testing.T, copyWithFileRange, copyWithFileClone *bool) {
+	dir, err := ioutil.TempDir("", "docker-copy-check")
+	require.NoError(t, err)
+	defer os.RemoveAll(dir)
+	srcFilename := filepath.Join(dir, "srcFilename")
+	dstFilename := filepath.Join(dir, "dstilename")
+
+	r := rand.New(rand.NewSource(0))
+	buf := make([]byte, 1024)
+	_, err = r.Read(buf)
+	require.NoError(t, err)
+	require.NoError(t, ioutil.WriteFile(srcFilename, buf, 0777))
+	fileinfo, err := os.Stat(srcFilename)
+	require.NoError(t, err)
+
+	require.NoError(t, copyRegular(srcFilename, dstFilename, fileinfo, copyWithFileRange, copyWithFileClone))
+	readBuf, err := ioutil.ReadFile(dstFilename)
+	require.NoError(t, err)
+	assert.Equal(t, buf, readBuf)
+}
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index 5f239a1..db42aad 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -21,10 +21,12 @@
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/dockerversion"
 	"github.com/docker/docker/pkg/devicemapper"
+	"github.com/docker/docker/pkg/dmesg"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/loopback"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/parsers"
+	"github.com/docker/docker/pkg/parsers/kernel"
 	units "github.com/docker/go-units"
 	"github.com/opencontainers/selinux/go-selinux/label"
 	"github.com/pkg/errors"
@@ -533,11 +535,11 @@
 	return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size)
 }
 
-// Return true only if kernel supports xfs and mkfs.xfs is available
-func xfsSupported() bool {
+// xfsSupported checks if xfs is supported, returns nil if it is, otherwise an error
+func xfsSupported() error {
 	// Make sure mkfs.xfs is available
 	if _, err := exec.LookPath("mkfs.xfs"); err != nil {
-		return false
+		return err // error text is descriptive enough
 	}
 
 	// Check if kernel supports xfs filesystem or not.
@@ -545,41 +547,48 @@
 
 	f, err := os.Open("/proc/filesystems")
 	if err != nil {
-		logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err)
-		return false
+		return errors.Wrapf(err, "error checking for xfs support")
 	}
 	defer f.Close()
 
 	s := bufio.NewScanner(f)
 	for s.Scan() {
 		if strings.HasSuffix(s.Text(), "\txfs") {
-			return true
+			return nil
 		}
 	}
 
 	if err := s.Err(); err != nil {
-		logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err)
+		return errors.Wrapf(err, "error checking for xfs support")
 	}
-	return false
+
+	return errors.New(`kernel does not support xfs, or "modprobe xfs" failed`)
 }
 
 func determineDefaultFS() string {
-	if xfsSupported() {
+	err := xfsSupported()
+	if err == nil {
 		return "xfs"
 	}
 
-	logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem")
+	logrus.Warnf("devmapper: XFS is not supported in your system (%v). Defaulting to ext4 filesystem", err)
 	return "ext4"
 }
 
+// mkfsOptions tries to figure out whether some additional mkfs options are required
+func mkfsOptions(fs string) []string {
+	if fs == "xfs" && !kernel.CheckKernelVersion(3, 16, 0) {
+		// For kernels earlier than 3.16 (and newer xfsutils),
+		// some xfs features need to be explicitly disabled.
+		return []string{"-m", "crc=0,finobt=0"}
+	}
+
+	return []string{}
+}
+
 func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) {
 	devname := info.DevName()
 
-	args := []string{}
-	args = append(args, devices.mkfsArgs...)
-
-	args = append(args, devname)
-
 	if devices.filesystem == "" {
 		devices.filesystem = determineDefaultFS()
 	}
@@ -587,7 +596,11 @@
 		return err
 	}
 
-	logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name())
+	args := mkfsOptions(devices.filesystem)
+	args = append(args, devices.mkfsArgs...)
+	args = append(args, devname)
+
+	logrus.Infof("devmapper: Creating filesystem %s on device %s, mkfs args: %v", devices.filesystem, info.Name(), args)
 	defer func() {
 		if err != nil {
 			logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err)
@@ -1188,7 +1201,7 @@
 	options = joinMountOptions(options, devices.mountOptions)
 
 	if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil {
-		return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err)
+		return fmt.Errorf("Error mounting '%s' on '%s': %s\n%v", info.DevName(), fsMountPoint, err, string(dmesg.Dmesg(256)))
 	}
 
 	defer unix.Unmount(fsMountPoint, unix.MNT_DETACH)
@@ -1254,14 +1267,13 @@
 }
 
 func setCloseOnExec(name string) {
-	if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil {
-		for _, i := range fileInfos {
-			link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
-			if link == name {
-				fd, err := strconv.Atoi(i.Name())
-				if err == nil {
-					unix.CloseOnExec(fd)
-				}
+	fileInfos, _ := ioutil.ReadDir("/proc/self/fd")
+	for _, i := range fileInfos {
+		link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name()))
+		if link == name {
+			fd, err := strconv.Atoi(i.Name())
+			if err == nil {
+				unix.CloseOnExec(fd)
 			}
 		}
 	}
@@ -2380,7 +2392,7 @@
 	options = joinMountOptions(options, label.FormatMountLabel("", mountLabel))
 
 	if err := mount.Mount(info.DevName(), path, fstype, options); err != nil {
-		return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err)
+		return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s\n%v", info.DevName(), path, err, string(dmesg.Dmesg(256)))
 	}
 
 	if fstype == "xfs" && devices.xfsNospaceRetries != "" {
diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go
index 1efd5f4..bdb2336 100644
--- a/daemon/graphdriver/devmapper/driver.go
+++ b/daemon/graphdriver/devmapper/driver.go
@@ -12,6 +12,7 @@
 	"github.com/sirupsen/logrus"
 
 	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/devicemapper"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/locker"
@@ -69,22 +70,17 @@
 
 	status := [][2]string{
 		{"Pool Name", s.PoolName},
-		{"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))},
-		{"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))},
+		{"Pool Blocksize", units.HumanSize(float64(s.SectorSize))},
+		{"Base Device Size", units.HumanSize(float64(s.BaseDeviceSize))},
 		{"Backing Filesystem", s.BaseDeviceFS},
-		{"Data file", s.DataFile},
-		{"Metadata file", s.MetadataFile},
-		{"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))},
-		{"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))},
-		{"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))},
-		{"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))},
-		{"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))},
-		{"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))},
-		{"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))},
 		{"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)},
-		{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
-		{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
-		{"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)},
+	}
+
+	if len(s.DataFile) > 0 {
+		status = append(status, [2]string{"Data file", s.DataFile})
+	}
+	if len(s.MetadataFile) > 0 {
+		status = append(status, [2]string{"Metadata file", s.MetadataFile})
 	}
 	if len(s.DataLoopback) > 0 {
 		status = append(status, [2]string{"Data loop file", s.DataLoopback})
@@ -92,6 +88,20 @@
 	if len(s.MetadataLoopback) > 0 {
 		status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback})
 	}
+
+	status = append(status, [][2]string{
+		{"Data Space Used", units.HumanSize(float64(s.Data.Used))},
+		{"Data Space Total", units.HumanSize(float64(s.Data.Total))},
+		{"Data Space Available", units.HumanSize(float64(s.Data.Available))},
+		{"Metadata Space Used", units.HumanSize(float64(s.Metadata.Used))},
+		{"Metadata Space Total", units.HumanSize(float64(s.Metadata.Total))},
+		{"Metadata Space Available", units.HumanSize(float64(s.Metadata.Available))},
+		{"Thin Pool Minimum Free Space", units.HumanSize(float64(s.MinFreeSpace))},
+		{"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)},
+		{"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)},
+		{"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)},
+	}...)
+
 	if vStr, err := devicemapper.GetLibraryVersion(); err == nil {
 		status = append(status, [2]string{"Library Version", vStr})
 	}
@@ -159,51 +169,45 @@
 	if err := d.DeviceSet.DeleteDevice(id, false); err != nil {
 		return fmt.Errorf("failed to remove device %s: %v", id, err)
 	}
-
-	mp := path.Join(d.home, "mnt", id)
-	if err := system.EnsureRemoveAll(mp); err != nil {
-		return err
-	}
-
-	return nil
+	return system.EnsureRemoveAll(path.Join(d.home, "mnt", id))
 }
 
 // Get mounts a device with given id into the root filesystem
-func (d *Driver) Get(id, mountLabel string) (string, error) {
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
 	d.locker.Lock(id)
 	defer d.locker.Unlock(id)
 	mp := path.Join(d.home, "mnt", id)
 	rootFs := path.Join(mp, "rootfs")
 	if count := d.ctr.Increment(mp); count > 1 {
-		return rootFs, nil
+		return containerfs.NewLocalContainerFS(rootFs), nil
 	}
 
 	uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
 	if err != nil {
 		d.ctr.Decrement(mp)
-		return "", err
+		return nil, err
 	}
 
 	// Create the target directories if they don't exist
 	if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) {
 		d.ctr.Decrement(mp)
-		return "", err
+		return nil, err
 	}
 	if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) {
 		d.ctr.Decrement(mp)
-		return "", err
+		return nil, err
 	}
 
 	// Mount the device
 	if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil {
 		d.ctr.Decrement(mp)
-		return "", err
+		return nil, err
 	}
 
 	if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) {
 		d.ctr.Decrement(mp)
 		d.DeviceSet.UnmountDevice(id, mp)
-		return "", err
+		return nil, err
 	}
 
 	idFile := path.Join(mp, "id")
@@ -213,11 +217,11 @@
 		if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil {
 			d.ctr.Decrement(mp)
 			d.DeviceSet.UnmountDevice(id, mp)
-			return "", err
+			return nil, err
 		}
 	}
 
-	return rootFs, nil
+	return containerfs.NewLocalContainerFS(rootFs), nil
 }
 
 // Put unmounts a device and removes it.
diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go
index 94c5209..68f9022 100644
--- a/daemon/graphdriver/driver.go
+++ b/daemon/graphdriver/driver.go
@@ -12,6 +12,7 @@
 	"github.com/vbatts/tar-split/tar/storage"
 
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/plugingetter"
 )
@@ -68,7 +69,7 @@
 	// Get returns the mountpoint for the layered filesystem referred
 	// to by this id. You can optionally specify a mountLabel or "".
 	// Returns the absolute path to the mounted layered filesystem.
-	Get(id, mountLabel string) (dir string, err error)
+	Get(id, mountLabel string) (fs containerfs.ContainerFS, err error)
 	// Put releases the system resources for the specified id,
 	// e.g, unmounting layered filesystem.
 	Put(id string) error
diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go
index 50c8e6a..a2be46b 100644
--- a/daemon/graphdriver/driver_linux.go
+++ b/daemon/graphdriver/driver_linux.go
@@ -67,6 +67,7 @@
 		FsMagicAufs:        "aufs",
 		FsMagicBtrfs:       "btrfs",
 		FsMagicCramfs:      "cramfs",
+		FsMagicEcryptfs:    "ecryptfs",
 		FsMagicExtfs:       "extfs",
 		FsMagicF2fs:        "f2fs",
 		FsMagicGPFS:        "gpfs",
diff --git a/daemon/graphdriver/driver_solaris.go b/daemon/graphdriver/driver_solaris.go
deleted file mode 100644
index 121fd92..0000000
--- a/daemon/graphdriver/driver_solaris.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// +build solaris,cgo
-
-package graphdriver
-
-/*
-#include <sys/statvfs.h>
-#include <stdlib.h>
-
-static inline struct statvfs *getstatfs(char *s) {
-        struct statvfs *buf;
-        int err;
-        buf = (struct statvfs *)malloc(sizeof(struct statvfs));
-        err = statvfs(s, buf);
-        return buf;
-}
-*/
-import "C"
-import (
-	"path/filepath"
-	"unsafe"
-
-	"github.com/docker/docker/pkg/mount"
-	"github.com/sirupsen/logrus"
-)
-
-const (
-	// FsMagicZfs filesystem id for Zfs
-	FsMagicZfs = FsMagic(0x2fc12fc1)
-)
-
-var (
-	// Slice of drivers that should be used in an order
-	priority = []string{
-		"zfs",
-	}
-
-	// FsNames maps filesystem id to name of the filesystem.
-	FsNames = map[FsMagic]string{
-		FsMagicZfs: "zfs",
-	}
-)
-
-// GetFSMagic returns the filesystem id given the path.
-func GetFSMagic(rootpath string) (FsMagic, error) {
-	return 0, nil
-}
-
-type fsChecker struct {
-	t FsMagic
-}
-
-func (c *fsChecker) IsMounted(path string) bool {
-	m, _ := Mounted(c.t, path)
-	return m
-}
-
-// NewFsChecker returns a checker configured for the provided FsMagic
-func NewFsChecker(t FsMagic) Checker {
-	return &fsChecker{
-		t: t,
-	}
-}
-
-// NewDefaultChecker returns a check that parses /proc/mountinfo to check
-// if the specified path is mounted.
-// No-op on Solaris.
-func NewDefaultChecker() Checker {
-	return &defaultChecker{}
-}
-
-type defaultChecker struct {
-}
-
-func (c *defaultChecker) IsMounted(path string) bool {
-	m, _ := mount.Mounted(path)
-	return m
-}
-
-// Mounted checks if the given path is mounted as the fs type
-//Solaris supports only ZFS for now
-func Mounted(fsType FsMagic, mountPath string) (bool, error) {
-
-	cs := C.CString(filepath.Dir(mountPath))
-	buf := C.getstatfs(cs)
-
-	// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
-	if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
-		(buf.f_basetype[3] != 0) {
-		logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath)
-		C.free(unsafe.Pointer(buf))
-		return false, ErrPrerequisites
-	}
-
-	C.free(unsafe.Pointer(buf))
-	C.free(unsafe.Pointer(cs))
-	return true, nil
-}
diff --git a/daemon/graphdriver/driver_unsupported.go b/daemon/graphdriver/driver_unsupported.go
index 4a87560..b3f6857 100644
--- a/daemon/graphdriver/driver_unsupported.go
+++ b/daemon/graphdriver/driver_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows,!freebsd,!solaris
+// +build !linux,!windows,!freebsd
 
 package graphdriver
 
diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go
index e8c7ff2..533c5a7 100644
--- a/daemon/graphdriver/fsdiff.go
+++ b/daemon/graphdriver/fsdiff.go
@@ -18,9 +18,9 @@
 )
 
 // NaiveDiffDriver takes a ProtoDriver and adds the
-// capability of the Diffing methods which it may or may not
-// support on its own. See the comment on the exported
-// NewNaiveDiffDriver function below.
+// capability of the Diffing methods on the local file system,
+// which it may or may not support on its own. See the comment
+// on the exported NewNaiveDiffDriver function below.
 // Notably, the AUFS driver doesn't need to be wrapped like this.
 type NaiveDiffDriver struct {
 	ProtoDriver
@@ -47,10 +47,11 @@
 	startTime := time.Now()
 	driver := gdw.ProtoDriver
 
-	layerFs, err := driver.Get(id, "")
+	layerRootFs, err := driver.Get(id, "")
 	if err != nil {
 		return nil, err
 	}
+	layerFs := layerRootFs.Path()
 
 	defer func() {
 		if err != nil {
@@ -70,12 +71,14 @@
 		}), nil
 	}
 
-	parentFs, err := driver.Get(parent, "")
+	parentRootFs, err := driver.Get(parent, "")
 	if err != nil {
 		return nil, err
 	}
 	defer driver.Put(parent)
 
+	parentFs := parentRootFs.Path()
+
 	changes, err := archive.ChangesDirs(layerFs, parentFs)
 	if err != nil {
 		return nil, err
@@ -94,7 +97,7 @@
 		// are extracted from tar's with full second precision on modified time.
 		// We need this hack here to make sure calls within same second receive
 		// correct result.
-		time.Sleep(startTime.Truncate(time.Second).Add(time.Second).Sub(time.Now()))
+		time.Sleep(time.Until(startTime.Truncate(time.Second).Add(time.Second)))
 		return err
 	}), nil
 }
@@ -104,20 +107,22 @@
 func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) {
 	driver := gdw.ProtoDriver
 
-	layerFs, err := driver.Get(id, "")
+	layerRootFs, err := driver.Get(id, "")
 	if err != nil {
 		return nil, err
 	}
 	defer driver.Put(id)
 
+	layerFs := layerRootFs.Path()
 	parentFs := ""
 
 	if parent != "" {
-		parentFs, err = driver.Get(parent, "")
+		parentRootFs, err := driver.Get(parent, "")
 		if err != nil {
 			return nil, err
 		}
 		defer driver.Put(parent)
+		parentFs = parentRootFs.Path()
 	}
 
 	return archive.ChangesDirs(layerFs, parentFs)
@@ -130,12 +135,13 @@
 	driver := gdw.ProtoDriver
 
 	// Mount the root filesystem so we can apply the diff/layer.
-	layerFs, err := driver.Get(id, "")
+	layerRootFs, err := driver.Get(id, "")
 	if err != nil {
 		return
 	}
 	defer driver.Put(id)
 
+	layerFs := layerRootFs.Path()
 	options := &archive.TarOptions{UIDMaps: gdw.uidMaps,
 		GIDMaps: gdw.gidMaps}
 	start := time.Now().UTC()
@@ -165,5 +171,5 @@
 	}
 	defer driver.Put(id)
 
-	return archive.ChangesSize(layerFs, changes), nil
+	return archive.ChangesSize(layerFs.Path(), changes), nil
 }
diff --git a/daemon/graphdriver/graphtest/graphbench_unix.go b/daemon/graphdriver/graphtest/graphbench_unix.go
index def822b..c04394d 100644
--- a/daemon/graphdriver/graphtest/graphbench_unix.go
+++ b/daemon/graphdriver/graphtest/graphbench_unix.go
@@ -3,13 +3,13 @@
 package graphtest
 
 import (
-	"bytes"
 	"io"
 	"io/ioutil"
-	"path/filepath"
 	"testing"
 
+	contdriver "github.com/containerd/continuity/driver"
 	"github.com/docker/docker/pkg/stringid"
+	"github.com/stretchr/testify/require"
 )
 
 // DriverBenchExists benchmarks calls to exist
@@ -245,15 +245,13 @@
 	for i := 0; i < b.N; i++ {
 
 		// Read content
-		c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt"))
+		c, err := contdriver.ReadFile(root, root.Join(root.Path(), "testfile.txt"))
 		if err != nil {
 			b.Fatal(err)
 		}
 
 		b.StopTimer()
-		if bytes.Compare(c, content) != 0 {
-			b.Fatalf("Wrong content in file %v, expected %v", c, content)
-		}
+		require.Equal(b, content, c)
 		b.StartTimer()
 	}
 }
diff --git a/daemon/graphdriver/graphtest/graphtest_unix.go b/daemon/graphdriver/graphtest/graphtest_unix.go
index 2f8ae54..6b352ba 100644
--- a/daemon/graphdriver/graphtest/graphtest_unix.go
+++ b/daemon/graphdriver/graphtest/graphtest_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris
+// +build linux freebsd
 
 package graphtest
 
@@ -97,10 +97,10 @@
 	dir, err := driver.Get("empty", "")
 	require.NoError(t, err)
 
-	verifyFile(t, dir, 0755|os.ModeDir, 0, 0)
+	verifyFile(t, dir.Path(), 0755|os.ModeDir, 0, 0)
 
 	// Verify that the directory is empty
-	fis, err := readDir(dir)
+	fis, err := readDir(dir, dir.Path())
 	require.NoError(t, err)
 	assert.Len(t, fis, 0)
 
@@ -328,9 +328,9 @@
 	}
 
 	quota := uint64(50 * units.MiB)
-	err = writeRandomFile(path.Join(mountPath, "file"), quota*2)
+
+	err = writeRandomFile(path.Join(mountPath.Path(), "file"), quota*2)
 	if pathError, ok := err.(*os.PathError); ok && pathError.Err != unix.EDQUOT {
 		t.Fatalf("expect write() to fail with %v, got %v", unix.EDQUOT, err)
 	}
-
 }
diff --git a/daemon/graphdriver/graphtest/testutil.go b/daemon/graphdriver/graphtest/testutil.go
index 35bf6d1..9463132 100644
--- a/daemon/graphdriver/graphtest/testutil.go
+++ b/daemon/graphdriver/graphtest/testutil.go
@@ -3,12 +3,11 @@
 import (
 	"bytes"
 	"fmt"
-	"io/ioutil"
 	"math/rand"
 	"os"
-	"path"
 	"sort"
 
+	"github.com/containerd/continuity/driver"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/stringid"
@@ -36,17 +35,17 @@
 	}
 	defer drv.Put(layer)
 
-	if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil {
+	if err := driver.WriteFile(root, root.Join(root.Path(), "file-a"), randomContent(64, seed), 0755); err != nil {
 		return err
 	}
-	if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil {
+	if err := root.MkdirAll(root.Join(root.Path(), "dir-b"), 0755); err != nil {
 		return err
 	}
-	if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil {
+	if err := driver.WriteFile(root, root.Join(root.Path(), "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil {
 		return err
 	}
 
-	return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755)
+	return driver.WriteFile(root, root.Join(root.Path(), "file-c"), randomContent(128*128, seed+2), 0755)
 }
 
 func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error {
@@ -56,12 +55,12 @@
 	}
 	defer drv.Put(layer)
 
-	fileContent, err := ioutil.ReadFile(path.Join(root, filename))
+	fileContent, err := driver.ReadFile(root, root.Join(root.Path(), filename))
 	if err != nil {
 		return err
 	}
 
-	if bytes.Compare(fileContent, content) != 0 {
+	if !bytes.Equal(fileContent, content) {
 		return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content)
 	}
 
@@ -75,7 +74,7 @@
 	}
 	defer drv.Put(layer)
 
-	return ioutil.WriteFile(path.Join(root, filename), content, 0755)
+	return driver.WriteFile(root, root.Join(root.Path(), filename), content, 0755)
 }
 
 func addDirectory(drv graphdriver.Driver, layer, dir string) error {
@@ -85,7 +84,7 @@
 	}
 	defer drv.Put(layer)
 
-	return os.MkdirAll(path.Join(root, dir), 0755)
+	return root.MkdirAll(root.Join(root.Path(), dir), 0755)
 }
 
 func removeAll(drv graphdriver.Driver, layer string, names ...string) error {
@@ -96,7 +95,7 @@
 	defer drv.Put(layer)
 
 	for _, filename := range names {
-		if err := os.RemoveAll(path.Join(root, filename)); err != nil {
+		if err := root.RemoveAll(root.Join(root.Path(), filename)); err != nil {
 			return err
 		}
 	}
@@ -110,8 +109,8 @@
 	}
 	defer drv.Put(layer)
 
-	if _, err := os.Stat(path.Join(root, filename)); err == nil {
-		return fmt.Errorf("file still exists: %s", path.Join(root, filename))
+	if _, err := root.Stat(root.Join(root.Path(), filename)); err == nil {
+		return fmt.Errorf("file still exists: %s", root.Join(root.Path(), filename))
 	} else if !os.IsNotExist(err) {
 		return err
 	}
@@ -127,13 +126,13 @@
 	defer drv.Put(layer)
 
 	for i := 0; i < count; i += 100 {
-		dir := path.Join(root, fmt.Sprintf("directory-%d", i))
-		if err := os.MkdirAll(dir, 0755); err != nil {
+		dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i))
+		if err := root.MkdirAll(dir, 0755); err != nil {
 			return err
 		}
 		for j := 0; i+j < count && j < 100; j++ {
-			file := path.Join(dir, fmt.Sprintf("file-%d", i+j))
-			if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil {
+			file := root.Join(dir, fmt.Sprintf("file-%d", i+j))
+			if err := driver.WriteFile(root, file, randomContent(64, seed+int64(i+j)), 0755); err != nil {
 				return err
 			}
 		}
@@ -152,7 +151,7 @@
 	changes := []archive.Change{}
 	for i := 0; i < count; i += 100 {
 		archiveRoot := fmt.Sprintf("/directory-%d", i)
-		if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil {
+		if err := root.MkdirAll(root.Join(root.Path(), archiveRoot), 0755); err != nil {
 			return nil, err
 		}
 		for j := 0; i+j < count && j < 100; j++ {
@@ -166,23 +165,23 @@
 			switch j % 3 {
 			// Update file
 			case 0:
-				change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
+				change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
 				change.Kind = archive.ChangeModify
-				if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
+				if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
 					return nil, err
 				}
 			// Add file
 			case 1:
-				change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j))
+				change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j))
 				change.Kind = archive.ChangeAdd
-				if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
+				if err := driver.WriteFile(root, root.Join(root.Path(), change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil {
 					return nil, err
 				}
 			// Remove file
 			case 2:
-				change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
+				change.Path = root.Join(archiveRoot, fmt.Sprintf("file-%d", i+j))
 				change.Kind = archive.ChangeDelete
-				if err := os.Remove(path.Join(root, change.Path)); err != nil {
+				if err := root.Remove(root.Join(root.Path(), change.Path)); err != nil {
 					return nil, err
 				}
 			}
@@ -201,17 +200,17 @@
 	defer drv.Put(layer)
 
 	for i := 0; i < count; i += 100 {
-		dir := path.Join(root, fmt.Sprintf("directory-%d", i))
+		dir := root.Join(root.Path(), fmt.Sprintf("directory-%d", i))
 		for j := 0; i+j < count && j < 100; j++ {
-			file := path.Join(dir, fmt.Sprintf("file-%d", i+j))
-			fileContent, err := ioutil.ReadFile(file)
+			file := root.Join(dir, fmt.Sprintf("file-%d", i+j))
+			fileContent, err := driver.ReadFile(root, file)
 			if err != nil {
 				return err
 			}
 
 			content := randomContent(64, seed+int64(i+j))
 
-			if bytes.Compare(fileContent, content) != 0 {
+			if !bytes.Equal(fileContent, content) {
 				return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content)
 			}
 		}
@@ -254,17 +253,17 @@
 	}
 	defer drv.Put(layer)
 
-	if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil {
+	if err := driver.WriteFile(root, root.Join(root.Path(), "top-id"), []byte(layer), 0755); err != nil {
 		return err
 	}
-	layerDir := path.Join(root, fmt.Sprintf("layer-%d", i))
-	if err := os.MkdirAll(layerDir, 0755); err != nil {
+	layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i))
+	if err := root.MkdirAll(layerDir, 0755); err != nil {
 		return err
 	}
-	if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil {
+	if err := driver.WriteFile(root, root.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil {
 		return err
 	}
-	if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil {
+	if err := driver.WriteFile(root, root.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil {
 		return err
 	}
 
@@ -295,26 +294,26 @@
 	}
 	defer drv.Put(layer)
 
-	layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id"))
+	layerIDBytes, err := driver.ReadFile(root, root.Join(root.Path(), "top-id"))
 	if err != nil {
 		return err
 	}
 
-	if bytes.Compare(layerIDBytes, []byte(layer)) != 0 {
+	if !bytes.Equal(layerIDBytes, []byte(layer)) {
 		return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer))
 	}
 
 	for i := count; i > 0; i-- {
-		layerDir := path.Join(root, fmt.Sprintf("layer-%d", i))
+		layerDir := root.Join(root.Path(), fmt.Sprintf("layer-%d", i))
 
-		thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id"))
+		thisLayerIDBytes, err := driver.ReadFile(root, root.Join(layerDir, "layer-id"))
 		if err != nil {
 			return err
 		}
-		if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 {
+		if !bytes.Equal(thisLayerIDBytes, layerIDBytes) {
 			return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes)
 		}
-		layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id"))
+		layerIDBytes, err = driver.ReadFile(root, root.Join(layerDir, "parent-id"))
 		if err != nil {
 			return err
 		}
@@ -322,11 +321,11 @@
 	return nil
 }
 
-// readDir reads a directory just like ioutil.ReadDir()
+// readDir reads a directory just like driver.ReadDir()
 // then hides specific files (currently "lost+found")
 // so the tests don't "see" it
-func readDir(dir string) ([]os.FileInfo, error) {
-	a, err := ioutil.ReadDir(dir)
+func readDir(r driver.Driver, dir string) ([]os.FileInfo, error) {
+	a, err := driver.ReadDir(r, dir)
 	if err != nil {
 		return nil, err
 	}
diff --git a/daemon/graphdriver/graphtest/testutil_unix.go b/daemon/graphdriver/graphtest/testutil_unix.go
index 9647448..b5dec43 100644
--- a/daemon/graphdriver/graphtest/testutil_unix.go
+++ b/daemon/graphdriver/graphtest/testutil_unix.go
@@ -3,12 +3,11 @@
 package graphtest
 
 import (
-	"io/ioutil"
 	"os"
-	"path"
 	"syscall"
 	"testing"
 
+	contdriver "github.com/containerd/continuity/driver"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
@@ -40,31 +39,31 @@
 	err := driver.CreateReadWrite(name, "", nil)
 	require.NoError(t, err)
 
-	dir, err := driver.Get(name, "")
+	dirFS, err := driver.Get(name, "")
 	require.NoError(t, err)
 	defer driver.Put(name)
 
-	subdir := path.Join(dir, "a subdir")
-	require.NoError(t, os.Mkdir(subdir, 0705|os.ModeSticky))
-	require.NoError(t, os.Chown(subdir, 1, 2))
+	subdir := dirFS.Join(dirFS.Path(), "a subdir")
+	require.NoError(t, dirFS.Mkdir(subdir, 0705|os.ModeSticky))
+	require.NoError(t, dirFS.Lchown(subdir, 1, 2))
 
-	file := path.Join(dir, "a file")
-	err = ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid)
+	file := dirFS.Join(dirFS.Path(), "a file")
+	err = contdriver.WriteFile(dirFS, file, []byte("Some data"), 0222|os.ModeSetuid)
 	require.NoError(t, err)
 }
 
 func verifyBase(t testing.TB, driver graphdriver.Driver, name string) {
-	dir, err := driver.Get(name, "")
+	dirFS, err := driver.Get(name, "")
 	require.NoError(t, err)
 	defer driver.Put(name)
 
-	subdir := path.Join(dir, "a subdir")
+	subdir := dirFS.Join(dirFS.Path(), "a subdir")
 	verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2)
 
-	file := path.Join(dir, "a file")
+	file := dirFS.Join(dirFS.Path(), "a file")
 	verifyFile(t, file, 0222|os.ModeSetuid, 0, 0)
 
-	files, err := readDir(dir)
+	files, err := readDir(dirFS, dirFS.Path())
 	require.NoError(t, err)
 	assert.Len(t, files, 2)
 }
diff --git a/daemon/graphdriver/lcow/lcow.go b/daemon/graphdriver/lcow/lcow.go
index 86beb3d..c999d5b 100644
--- a/daemon/graphdriver/lcow/lcow.go
+++ b/daemon/graphdriver/lcow/lcow.go
@@ -13,7 +13,7 @@
 // operations. The downside of safe-mode is that operations are slower as
 // a new service utility VM has to be started and torn-down when needed.
 //
-// Options (needs official documentation, but lets get full functionality first...) @jhowardmsft
+// Options:
 //
 // The following options are read by the graphdriver itself:
 //
@@ -23,33 +23,33 @@
 //
 //   * lcow.sandboxsize - Specifies a custom sandbox size in GB for starting a container
 //        -- Possible values:      >= default sandbox size (opengcs defined, currently 20)
-//        -- Default if ommitted:  20
+//        -- Default if omitted:  20
 //
 // The following options are read by opengcs:
 //
 //   * lcow.kirdpath - Specifies a custom path to a kernel/initrd pair
 //        -- Possible values:      Any local path that is not a mapped drive
-//        -- Default if ommitted:  %ProgramFiles%\Linux Containers
+//        -- Default if omitted:  %ProgramFiles%\Linux Containers
 //
 //   * lcow.kernel - Specifies a custom kernel file located in the `lcow.kirdpath` path
 //        -- Possible values:      Any valid filename
-//        -- Default if ommitted:  bootx64.efi
+//        -- Default if omitted:  bootx64.efi
 //
 //   * lcow.initrd - Specifies a custom initrd file located in the `lcow.kirdpath` path
 //        -- Possible values:      Any valid filename
-//        -- Default if ommitted:  initrd.img
+//        -- Default if omitted:  initrd.img
 //
 //   * lcow.bootparameters - Specifies additional boot parameters for booting in kernel+initrd mode
 //        -- Possible values:      Any valid linux kernel boot options
-//        -- Default if ommitted:  <nil>
+//        -- Default if omitted:  <nil>
 //
 //   * lcow.vhdx - Specifies a custom vhdx file to boot (instead of a kernel+initrd)
 //        -- Possible values:      Any valid filename
-//        -- Default if ommitted:  uvm.vhdx under `lcow.kirdpath`
+//        -- Default if omitted:  uvm.vhdx under `lcow.kirdpath`
 //
 //   * lcow.timeout - Specifies a timeout for utility VM operations in seconds
 //        -- Possible values:      >=0
-//        -- Default if ommitted:  300
+//        -- Default if omitted:  300
 
 // TODO: Grab logs from SVM at terminate or errors
 
@@ -65,12 +65,14 @@
 	"strconv"
 	"strings"
 	"sync"
+	"syscall"
 	"time"
 
 	"github.com/Microsoft/hcsshim"
 	"github.com/Microsoft/opengcs/client"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/system"
@@ -106,72 +108,24 @@
 
 	// scratchDirectory is the sub-folder under the driver's data-root used for scratch VHDs in service VMs
 	scratchDirectory = "scratch"
+
+	// errOperationPending is the HRESULT returned by the HCS when the VM termination operation is still pending.
+	errOperationPending syscall.Errno = 0xc0370103
 )
 
-// cacheItem is our internal structure representing an item in our local cache
-// of things that have been mounted.
-type cacheItem struct {
-	sync.Mutex        // Protects operations performed on this item
-	uvmPath    string // Path in utility VM
-	hostPath   string // Path on host
-	refCount   int    // How many times its been mounted
-	isSandbox  bool   // True if a sandbox
-	isMounted  bool   // True when mounted in a service VM
-}
-
-// setIsMounted is a helper function for a cacheItem which does exactly what it says
-func (ci *cacheItem) setIsMounted() {
-	logrus.Debugf("locking cache item for set isMounted")
-	ci.Lock()
-	defer ci.Unlock()
-	ci.isMounted = true
-	logrus.Debugf("set isMounted on cache item")
-}
-
-// incrementRefCount is a helper function for a cacheItem which does exactly what it says
-func (ci *cacheItem) incrementRefCount() {
-	logrus.Debugf("locking cache item for increment")
-	ci.Lock()
-	defer ci.Unlock()
-	ci.refCount++
-	logrus.Debugf("incremented refcount on cache item %+v", ci)
-}
-
-// decrementRefCount is a helper function for a cacheItem which does exactly what it says
-func (ci *cacheItem) decrementRefCount() int {
-	logrus.Debugf("locking cache item for decrement")
-	ci.Lock()
-	defer ci.Unlock()
-	ci.refCount--
-	logrus.Debugf("decremented refcount on cache item %+v", ci)
-	return ci.refCount
-}
-
-// serviceVMItem is our internal structure representing an item in our
-// map of service VMs we are maintaining.
-type serviceVMItem struct {
-	sync.Mutex                     // Serialises operations being performed in this service VM.
-	scratchAttached bool           // Has a scratch been attached?
-	config          *client.Config // Represents the service VM item.
-}
-
 // Driver represents an LCOW graph driver.
 type Driver struct {
-	dataRoot           string                    // Root path on the host where we are storing everything.
-	cachedSandboxFile  string                    // Location of the local default-sized cached sandbox.
-	cachedSandboxMutex sync.Mutex                // Protects race conditions from multiple threads creating the cached sandbox.
-	cachedScratchFile  string                    // Location of the local cached empty scratch space.
-	cachedScratchMutex sync.Mutex                // Protects race conditions from multiple threads creating the cached scratch.
-	options            []string                  // Graphdriver options we are initialised with.
-	serviceVmsMutex    sync.Mutex                // Protects add/updates/delete to the serviceVMs map.
-	serviceVms         map[string]*serviceVMItem // Map of the configs representing the service VM(s) we are running.
-	globalMode         bool                      // Indicates if running in an unsafe/global service VM mode.
+	dataRoot           string     // Root path on the host where we are storing everything.
+	cachedSandboxFile  string     // Location of the local default-sized cached sandbox.
+	cachedSandboxMutex sync.Mutex // Protects race conditions from multiple threads creating the cached sandbox.
+	cachedScratchFile  string     // Location of the local cached empty scratch space.
+	cachedScratchMutex sync.Mutex // Protects race conditions from multiple threads creating the cached scratch.
+	options            []string   // Graphdriver options we are initialised with.
+	globalMode         bool       // Indicates if running in an unsafe/global service VM mode.
 
 	// NOTE: It is OK to use a cache here because Windows does not support
 	// restoring containers when the daemon dies.
-
-	cacheMutex sync.Mutex            // Protects add/update/deletes to cache.
-	cache      map[string]*cacheItem // Map holding a cache of all the IDs we've mounted/unmounted.
+	serviceVms *serviceVMMap // Map of the configs representing the service VM(s) we are running.
 }
 
 // layerDetails is the structure returned by a helper function `getLayerDetails`
@@ -204,9 +158,10 @@
 		options:           options,
 		cachedSandboxFile: filepath.Join(cd, sandboxFilename),
 		cachedScratchFile: filepath.Join(cd, scratchFilename),
-		cache:             make(map[string]*cacheItem),
-		serviceVms:        make(map[string]*serviceVMItem),
-		globalMode:        false,
+		serviceVms: &serviceVMMap{
+			svms: make(map[string]*serviceVMMapItem),
+		},
+		globalMode: false,
 	}
 
 	// Looks for relevant options
@@ -248,53 +203,59 @@
 	return d, nil
 }
 
+func (d *Driver) getVMID(id string) string {
+	if d.globalMode {
+		return svmGlobalID
+	}
+	return id
+}
+
 // startServiceVMIfNotRunning starts a service utility VM if it is not currently running.
 // It can optionally be started with a mapped virtual disk. Returns a opengcs config structure
 // representing the VM.
-func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd *hcsshim.MappedVirtualDisk, context string) (*serviceVMItem, error) {
+func (d *Driver) startServiceVMIfNotRunning(id string, mvdToAdd []hcsshim.MappedVirtualDisk, context string) (_ *serviceVM, err error) {
 	// Use the global ID if in global mode
-	if d.globalMode {
-		id = svmGlobalID
-	}
+	id = d.getVMID(id)
 
 	title := fmt.Sprintf("lcowdriver: startservicevmifnotrunning %s:", id)
 
-	// Make sure thread-safe when interrogating the map
-	logrus.Debugf("%s taking serviceVmsMutex", title)
-	d.serviceVmsMutex.Lock()
+	// Attempt to add ID to the service vm map
+	logrus.Debugf("%s: Adding entry to service vm map", title)
+	svm, exists, err := d.serviceVms.add(id)
+	if err != nil && err == errVMisTerminating {
+		// VM is in the process of terminating. Wait until it's done and and then try again
+		logrus.Debugf("%s: VM with current ID still in the process of terminating: %s", title, id)
+		if err := svm.getStopError(); err != nil {
+			logrus.Debugf("%s: VM %s did not stop successfully: %s", title, id, err)
+			return nil, err
+		}
+		return d.startServiceVMIfNotRunning(id, mvdToAdd, context)
+	} else if err != nil {
+		logrus.Debugf("%s: failed to add service vm to map: %s", err)
+		return nil, fmt.Errorf("%s: failed to add to service vm map: %s", title, err)
+	}
 
-	// Nothing to do if it's already running except add the mapped drive if supplied.
-	if svm, ok := d.serviceVms[id]; ok {
-		logrus.Debugf("%s exists, releasing serviceVmsMutex", title)
-		d.serviceVmsMutex.Unlock()
-
-		if mvdToAdd != nil {
-			logrus.Debugf("hot-adding %s to %s", mvdToAdd.HostPath, mvdToAdd.ContainerPath)
-
-			// Ensure the item is locked while doing this
-			logrus.Debugf("%s locking serviceVmItem %s", title, svm.config.Name)
-			svm.Lock()
-
-			if err := svm.config.HotAddVhd(mvdToAdd.HostPath, mvdToAdd.ContainerPath, false, true); err != nil {
-				logrus.Debugf("%s releasing serviceVmItem %s on hot-add failure %s", title, svm.config.Name, err)
-				svm.Unlock()
-				return nil, fmt.Errorf("%s hot add %s to %s failed: %s", title, mvdToAdd.HostPath, mvdToAdd.ContainerPath, err)
-			}
-
-			logrus.Debugf("%s releasing serviceVmItem %s", title, svm.config.Name)
-			svm.Unlock()
+	if exists {
+		// Service VM is already up and running. In this case, just hot add the vhds.
+		logrus.Debugf("%s: service vm already exists. Just hot adding: %+v", title, mvdToAdd)
+		if err := svm.hotAddVHDs(mvdToAdd...); err != nil {
+			logrus.Debugf("%s: failed to hot add vhds on service vm creation: %s", title, err)
+			return nil, fmt.Errorf("%s: failed to hot add vhds on service vm: %s", title, err)
 		}
 		return svm, nil
 	}
 
-	// Release the lock early
-	logrus.Debugf("%s releasing serviceVmsMutex", title)
-	d.serviceVmsMutex.Unlock()
+	// We are the first service for this id, so we need to start it
+	logrus.Debugf("%s: service vm doesn't exist. Now starting it up: %s", title, id)
 
-	// So we are starting one. First need an enpty structure.
-	svm := &serviceVMItem{
-		config: &client.Config{},
-	}
+	defer func() {
+		// Signal that start has finished, passing in the error if any.
+		svm.signalStartFinished(err)
+		if err != nil {
+			// We added a ref to the VM, since we failed, we should delete the ref.
+			d.terminateServiceVM(id, "error path on startServiceVMIfNotRunning", false)
+		}
+	}()
 
 	// Generate a default configuration
 	if err := svm.config.GenerateDefault(d.options); err != nil {
@@ -335,12 +296,14 @@
 		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvd)
 		svm.scratchAttached = true
 	}
+
 	logrus.Debugf("%s releasing cachedScratchMutex", title)
 	d.cachedScratchMutex.Unlock()
 
 	// If requested to start it with a mapped virtual disk, add it now.
-	if mvdToAdd != nil {
-		svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, *mvdToAdd)
+	svm.config.MappedVirtualDisks = append(svm.config.MappedVirtualDisks, mvdToAdd...)
+	for _, mvd := range svm.config.MappedVirtualDisks {
+		svm.attachedVHDs[mvd.HostPath] = 1
 	}
 
 	// Start it.
@@ -349,108 +312,80 @@
 		return nil, fmt.Errorf("failed to start service utility VM (%s): %s", context, err)
 	}
 
-	// As it's now running, add it to the map, checking for a race where another
-	// thread has simultaneously tried to start it.
-	logrus.Debugf("%s locking serviceVmsMutex for insertion", title)
-	d.serviceVmsMutex.Lock()
-	if svm, ok := d.serviceVms[id]; ok {
-		logrus.Debugf("%s releasing serviceVmsMutex after insertion but exists", title)
-		d.serviceVmsMutex.Unlock()
-		return svm, nil
-	}
-	d.serviceVms[id] = svm
-	logrus.Debugf("%s releasing serviceVmsMutex after insertion", title)
-	d.serviceVmsMutex.Unlock()
+	// defer function to terminate the VM if the next steps fail
+	defer func() {
+		if err != nil {
+			waitTerminate(svm, fmt.Sprintf("startServiceVmIfNotRunning: %s (%s)", id, context))
+		}
+	}()
 
 	// Now we have a running service VM, we can create the cached scratch file if it doesn't exist.
 	logrus.Debugf("%s locking cachedScratchMutex", title)
 	d.cachedScratchMutex.Lock()
 	if _, err := os.Stat(d.cachedScratchFile); err != nil {
-		logrus.Debugf("%s (%s): creating an SVM scratch - locking serviceVM", title, context)
-		svm.Lock()
+		logrus.Debugf("%s (%s): creating an SVM scratch", title, context)
+
+		// Don't use svm.CreateExt4Vhdx since that only works when the service vm is setup,
+		// but we're still in that process right now.
 		if err := svm.config.CreateExt4Vhdx(scratchTargetFile, client.DefaultVhdxSizeGB, d.cachedScratchFile); err != nil {
-			logrus.Debugf("%s (%s): releasing serviceVM on error path from CreateExt4Vhdx: %s", title, context, err)
-			svm.Unlock()
 			logrus.Debugf("%s (%s): releasing cachedScratchMutex on error path", title, context)
 			d.cachedScratchMutex.Unlock()
-
-			// Do a force terminate and remove it from the map on failure, ignoring any errors
-			if err2 := d.terminateServiceVM(id, "error path from CreateExt4Vhdx", true); err2 != nil {
-				logrus.Warnf("failed to terminate service VM on error path from CreateExt4Vhdx: %s", err2)
-			}
-
+			logrus.Debugf("%s: failed to create vm scratch %s: %s", title, scratchTargetFile, err)
 			return nil, fmt.Errorf("failed to create SVM scratch VHDX (%s): %s", context, err)
 		}
-		logrus.Debugf("%s (%s): releasing serviceVM after %s created and cached to %s", title, context, scratchTargetFile, d.cachedScratchFile)
-		svm.Unlock()
 	}
 	logrus.Debugf("%s (%s): releasing cachedScratchMutex", title, context)
 	d.cachedScratchMutex.Unlock()
 
 	// Hot-add the scratch-space if not already attached
 	if !svm.scratchAttached {
-		logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s - locking serviceVM", context, scratchTargetFile)
-		svm.Lock()
-		if err := svm.config.HotAddVhd(scratchTargetFile, toolsScratchPath, false, true); err != nil {
-			logrus.Debugf("%s (%s): releasing serviceVM on error path of HotAddVhd: %s", title, context, err)
-			svm.Unlock()
-
-			// Do a force terminate and remove it from the map on failure, ignoring any errors
-			if err2 := d.terminateServiceVM(id, "error path from HotAddVhd", true); err2 != nil {
-				logrus.Warnf("failed to terminate service VM on error path from HotAddVhd: %s", err2)
-			}
-
+		logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) hot-adding scratch %s", context, scratchTargetFile)
+		if err := svm.hotAddVHDsAtStart(hcsshim.MappedVirtualDisk{
+			HostPath:          scratchTargetFile,
+			ContainerPath:     toolsScratchPath,
+			CreateInUtilityVM: true,
+		}); err != nil {
+			logrus.Debugf("%s: failed to hot-add scratch %s: %s", title, scratchTargetFile, err)
 			return nil, fmt.Errorf("failed to hot-add %s failed: %s", scratchTargetFile, err)
 		}
-		logrus.Debugf("%s (%s): releasing serviceVM", title, context)
-		svm.Unlock()
+		svm.scratchAttached = true
 	}
 
 	logrus.Debugf("lcowdriver: startServiceVmIfNotRunning: (%s) success", context)
 	return svm, nil
 }
 
-// getServiceVM returns the appropriate service utility VM instance, optionally
-// deleting it from the map (but not the global one)
-func (d *Driver) getServiceVM(id string, deleteFromMap bool) (*serviceVMItem, error) {
-	logrus.Debugf("lcowdriver: getservicevm:locking serviceVmsMutex")
-	d.serviceVmsMutex.Lock()
-	defer func() {
-		logrus.Debugf("lcowdriver: getservicevm:releasing serviceVmsMutex")
-		d.serviceVmsMutex.Unlock()
-	}()
-	if d.globalMode {
-		id = svmGlobalID
-	}
-	if _, ok := d.serviceVms[id]; !ok {
-		return nil, fmt.Errorf("getservicevm for %s failed as not found", id)
-	}
-	svm := d.serviceVms[id]
-	if deleteFromMap && id != svmGlobalID {
-		logrus.Debugf("lcowdriver: getservicevm: removing %s from map", id)
-		delete(d.serviceVms, id)
-	}
-	return svm, nil
-}
-
-// terminateServiceVM terminates a service utility VM if its running, but does nothing
-// when in global mode as it's lifetime is limited to that of the daemon.
-func (d *Driver) terminateServiceVM(id, context string, force bool) error {
-
+// terminateServiceVM terminates a service utility VM if its running if it's,
+// not being used by any goroutine, but does nothing when in global mode as it's
+// lifetime is limited to that of the daemon. If the force flag is set, then
+// the VM will be killed regardless of the ref count or if it's global.
+func (d *Driver) terminateServiceVM(id, context string, force bool) (err error) {
 	// We don't do anything in safe mode unless the force flag has been passed, which
 	// is only the case for cleanup at driver termination.
-	if d.globalMode {
-		if !force {
-			logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
-			return nil
-		}
-		id = svmGlobalID
+	if d.globalMode && !force {
+		logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - doing nothing as in global mode", id, context)
+		return nil
 	}
 
-	// Get the service VM and delete it from the map
-	svm, err := d.getServiceVM(id, true)
-	if err != nil {
-		return err
+	id = d.getVMID(id)
+
+	var svm *serviceVM
+	var lastRef bool
+	if !force {
+		// In the not force case, we ref count
+		svm, lastRef, err = d.serviceVms.decrementRefCount(id)
+	} else {
+		// In the force case, we ignore the ref count and just set it to 0
+		svm, err = d.serviceVms.setRefCountZero(id)
+		lastRef = true
+	}
+
+	if err == errVMUnknown {
+		return nil
+	} else if err == errVMisTerminating {
+		return svm.getStopError()
+	} else if !lastRef {
+		return nil
 	}
 
 	// We run the deletion of the scratch as a deferred function to at least attempt
@@ -459,29 +394,67 @@
 		if svm.scratchAttached {
 			scratchTargetFile := filepath.Join(d.dataRoot, scratchDirectory, fmt.Sprintf("%s.vhdx", id))
 			logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - deleting scratch %s", id, context, scratchTargetFile)
-			if err := os.Remove(scratchTargetFile); err != nil {
-				logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, err)
+			if errRemove := os.Remove(scratchTargetFile); errRemove != nil {
+				logrus.Warnf("failed to remove scratch file %s (%s): %s", scratchTargetFile, context, errRemove)
+				err = errRemove
 			}
 		}
+
+		// This function shouldn't actually return error unless there is a bug
+		if errDelete := d.serviceVms.deleteID(id); errDelete != nil {
+			logrus.Warnf("failed to service vm from svm map %s (%s): %s", id, context, errDelete)
+		}
+
+		// Signal that this VM has stopped
+		svm.signalStopFinished(err)
 	}()
 
-	// Nothing to do if it's not running
-	if svm.config.Uvm != nil {
-		logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - calling terminate", id, context)
-		if err := svm.config.Uvm.Terminate(); err != nil {
-			return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
-		}
+	// Now it's possible that the serivce VM failed to start and now we are trying to termiante it.
+	// In this case, we will relay the error to the goroutines waiting for this vm to stop.
+	if err := svm.getStartError(); err != nil {
+		logrus.Debugf("lcowdriver: terminateservicevm: %s had failed to start up: %s", id, err)
+		return err
+	}
 
-		logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - waiting for utility VM to terminate", id, context)
-		if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
-			return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
-		}
+	if err := waitTerminate(svm, fmt.Sprintf("terminateservicevm: %s (%s)", id, context)); err != nil {
+		return err
 	}
 
 	logrus.Debugf("lcowdriver: terminateservicevm: %s (%s) - success", id, context)
 	return nil
 }
 
+func waitTerminate(svm *serviceVM, context string) error {
+	if svm.config == nil {
+		return fmt.Errorf("lcowdriver: waitTermiante: Nil utility VM. %s", context)
+	}
+
+	logrus.Debugf("lcowdriver: waitTerminate: Calling terminate: %s", context)
+	if err := svm.config.Uvm.Terminate(); err != nil {
+		// We might get operation still pending from the HCS. In that case, we shouldn't return
+		// an error since we call wait right after.
+		underlyingError := err
+		if conterr, ok := err.(*hcsshim.ContainerError); ok {
+			underlyingError = conterr.Err
+		}
+
+		if syscallErr, ok := underlyingError.(syscall.Errno); ok {
+			underlyingError = syscallErr
+		}
+
+		if underlyingError != errOperationPending {
+			return fmt.Errorf("failed to terminate utility VM (%s): %s", context, err)
+		}
+		logrus.Debugf("lcowdriver: waitTerminate: uvm.Terminate() returned operation pending (%s)", context)
+	}
+
+	logrus.Debugf("lcowdriver: waitTerminate: (%s) - waiting for utility VM to terminate", context)
+	if err := svm.config.Uvm.WaitTimeout(time.Duration(svm.config.UvmTimeoutSeconds) * time.Second); err != nil {
+		return fmt.Errorf("failed waiting for utility VM to terminate (%s): %s", context, err)
+	}
+	return nil
+}
+
 // String returns the string representation of a driver. This should match
 // the name the graph driver has been registered with.
 func (d *Driver) String() string {
@@ -571,25 +544,18 @@
 		}()
 	}
 
-	// Synchronise the operation in the service VM.
-	logrus.Debugf("%s: locking svm for sandbox creation", title)
-	svm.Lock()
-	defer func() {
-		logrus.Debugf("%s: releasing svm for sandbox creation", title)
-		svm.Unlock()
-	}()
-
 	// Make sure we don't write to our local cached copy if this is for a non-default size request.
 	targetCacheFile := d.cachedSandboxFile
 	if sandboxSize != client.DefaultVhdxSizeGB {
 		targetCacheFile = ""
 	}
 
-	// Actually do the creation.
-	if err := svm.config.CreateExt4Vhdx(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
+	// Create the ext4 vhdx
+	logrus.Debugf("%s: creating sandbox ext4 vhdx", title)
+	if err := svm.createExt4VHDX(filepath.Join(d.dir(id), sandboxFilename), uint32(sandboxSize), targetCacheFile); err != nil {
+		logrus.Debugf("%s: failed to create sandbox vhdx for %s: %s", title, id, err)
 		return err
 	}
-
 	return nil
 }
 
@@ -638,6 +604,21 @@
 	layerPath := d.dir(id)
 
 	logrus.Debugf("lcowdriver: remove: id %s: layerPath %s", id, layerPath)
+
+	// Unmount all the layers
+	err := d.Put(id)
+	if err != nil {
+		logrus.Debugf("lcowdriver: remove id %s: failed to unmount: %s", id, err)
+		return err
+	}
+
+	// for non-global case just kill the vm
+	if !d.globalMode {
+		if err := d.terminateServiceVM(id, fmt.Sprintf("Remove %s", id), true); err != nil {
+			return err
+		}
+	}
+
 	if err := os.Rename(layerPath, tmpLayerPath); err != nil && !os.IsNotExist(err) {
 		return err
 	}
@@ -659,43 +640,24 @@
 // For optimisation, we don't actually mount the filesystem (which in our
 // case means [hot-]adding it to a service VM. But we track that and defer
 // the actual adding to the point we need to access it.
-func (d *Driver) Get(id, mountLabel string) (string, error) {
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
 	title := fmt.Sprintf("lcowdriver: get: %s", id)
 	logrus.Debugf(title)
 
-	// Work out what we are working on
-	ld, err := getLayerDetails(d.dir(id))
+	// Generate the mounts needed for the defered operation.
+	disks, err := d.getAllMounts(id)
 	if err != nil {
-		logrus.Debugf("%s failed to get layer details from %s: %s", title, d.dir(id), err)
-		return "", fmt.Errorf("%s failed to open layer or sandbox VHD to open in %s: %s", title, d.dir(id), err)
+		logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
+		return nil, fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
 	}
-	logrus.Debugf("%s %s, size %d, isSandbox %t", title, ld.filename, ld.size, ld.isSandbox)
 
-	// Add item to cache, or update existing item, but ensure we have the
-	// lock while updating items.
-	logrus.Debugf("%s: locking cacheMutex", title)
-	d.cacheMutex.Lock()
-	var ci *cacheItem
-	if item, ok := d.cache[id]; !ok {
-		// The item is not currently in the cache.
-		ci = &cacheItem{
-			refCount:  1,
-			isSandbox: ld.isSandbox,
-			hostPath:  ld.filename,
-			uvmPath:   fmt.Sprintf("/mnt/%s", id),
-			isMounted: false, // we defer this as an optimisation
-		}
-		d.cache[id] = ci
-		logrus.Debugf("%s: added cache item %+v", title, ci)
-	} else {
-		// Increment the reference counter in the cache.
-		item.incrementRefCount()
-	}
-	logrus.Debugf("%s: releasing cacheMutex", title)
-	d.cacheMutex.Unlock()
-
-	logrus.Debugf("%s %s success. %s: %+v: size %d", title, id, d.dir(id), ci, ld.size)
-	return d.dir(id), nil
+	logrus.Debugf("%s: got layer mounts: %+v", title, disks)
+	return &lcowfs{
+		root:        unionMountName(disks),
+		d:           d,
+		mappedDisks: disks,
+		vmID:        d.getVMID(id),
+	}, nil
 }
 
 // Put does the reverse of get. If there are no more references to
@@ -703,56 +665,45 @@
 func (d *Driver) Put(id string) error {
 	title := fmt.Sprintf("lcowdriver: put: %s", id)
 
-	logrus.Debugf("%s: locking cacheMutex", title)
-	d.cacheMutex.Lock()
-	item, ok := d.cache[id]
-	if !ok {
-		logrus.Debugf("%s: releasing cacheMutex on error path", title)
-		d.cacheMutex.Unlock()
-		return fmt.Errorf("%s possible ref-count error, or invalid id was passed to the graphdriver. Cannot handle id %s as it's not in the cache", title, id)
-	}
-
-	// Decrement the ref-count, and nothing more to do if still in use.
-	if item.decrementRefCount() > 0 {
-		logrus.Debugf("%s: releasing cacheMutex. Cache item is still in use", title)
-		d.cacheMutex.Unlock()
+	// Get the service VM that we need to remove from
+	svm, err := d.serviceVms.get(d.getVMID(id))
+	if err == errVMUnknown {
 		return nil
+	} else if err == errVMisTerminating {
+		return svm.getStopError()
 	}
 
-	// Remove from the cache map.
-	delete(d.cache, id)
-	logrus.Debugf("%s: releasing cacheMutex. Ref count on cache item has dropped to zero, removed from cache", title)
-	d.cacheMutex.Unlock()
+	// Generate the mounts that Get() might have mounted
+	disks, err := d.getAllMounts(id)
+	if err != nil {
+		logrus.Debugf("%s failed to get all layer details for %s: %s", title, d.dir(id), err)
+		return fmt.Errorf("%s failed to get layer details for %s: %s", title, d.dir(id), err)
+	}
 
-	// If we have done a mount and we are in global mode, then remove it. We don't
-	// need to remove in safe mode as the service VM is going to be torn down anyway.
-	if d.globalMode {
-		logrus.Debugf("%s: locking cache item at zero ref-count", title)
-		item.Lock()
-		defer func() {
-			logrus.Debugf("%s: releasing cache item at zero ref-count", title)
-			item.Unlock()
-		}()
-		if item.isMounted {
-			svm, err := d.getServiceVM(id, false)
-			if err != nil {
-				return err
-			}
+	// Now, we want to perform the unmounts, hot-remove and stop the service vm.
+	// We want to go though all the steps even if we have an error to clean up properly
+	err = svm.deleteUnionMount(unionMountName(disks), disks...)
+	if err != nil {
+		logrus.Debugf("%s failed to delete union mount %s: %s", title, id, err)
+	}
 
-			logrus.Debugf("%s: Hot-Removing %s. Locking svm", title, item.hostPath)
-			svm.Lock()
-			if err := svm.config.HotRemoveVhd(item.hostPath); err != nil {
-				logrus.Debugf("%s: releasing svm on error path", title)
-				svm.Unlock()
-				return fmt.Errorf("%s failed to hot-remove %s from global service utility VM: %s", title, item.hostPath, err)
-			}
-			logrus.Debugf("%s: releasing svm", title)
-			svm.Unlock()
+	err1 := svm.hotRemoveVHDs(disks...)
+	if err1 != nil {
+		logrus.Debugf("%s failed to hot remove vhds %s: %s", title, id, err)
+		if err == nil {
+			err = err1
 		}
 	}
 
-	logrus.Debugf("%s %s: refCount 0. %s (%s) completed successfully", title, id, item.hostPath, item.uvmPath)
-	return nil
+	err1 = d.terminateServiceVM(id, fmt.Sprintf("Put %s", id), false)
+	if err1 != nil {
+		logrus.Debugf("%s failed to terminate service vm %s: %s", title, id, err1)
+		if err == nil {
+			err = err1
+		}
+	}
+	logrus.Debugf("Put succeeded on id %s", id)
+	return err
 }
 
 // Cleanup ensures the information the driver stores is properly removed.
@@ -761,15 +712,6 @@
 func (d *Driver) Cleanup() error {
 	title := "lcowdriver: cleanup"
 
-	d.cacheMutex.Lock()
-	for k, v := range d.cache {
-		logrus.Debugf("%s cache item: %s: %+v", title, k, v)
-		if v.refCount > 0 {
-			logrus.Warnf("%s leaked %s: %+v", title, k, v)
-		}
-	}
-	d.cacheMutex.Unlock()
-
 	items, err := ioutil.ReadDir(d.dataRoot)
 	if err != nil {
 		if os.IsNotExist(err) {
@@ -794,8 +736,8 @@
 
 	// Cleanup any service VMs we have running, along with their scratch spaces.
 	// We don't take the lock for this as it's taken in terminateServiceVm.
-	for k, v := range d.serviceVms {
-		logrus.Debugf("%s svm: %s: %+v", title, k, v)
+	for k, v := range d.serviceVms.svms {
+		logrus.Debugf("%s svm entry: %s: %+v", title, k, v)
 		d.terminateServiceVM(k, "cleanup", true)
 	}
 
@@ -812,65 +754,41 @@
 func (d *Driver) Diff(id, parent string) (io.ReadCloser, error) {
 	title := fmt.Sprintf("lcowdriver: diff: %s", id)
 
-	logrus.Debugf("%s: locking cacheMutex", title)
-	d.cacheMutex.Lock()
-	if _, ok := d.cache[id]; !ok {
-		logrus.Debugf("%s: releasing cacheMutex on error path", title)
-		d.cacheMutex.Unlock()
-		return nil, fmt.Errorf("%s fail as %s is not in the cache", title, id)
-	}
-	ci := d.cache[id]
-	logrus.Debugf("%s: releasing cacheMutex", title)
-	d.cacheMutex.Unlock()
-
-	// Stat to get size
-	logrus.Debugf("%s: locking cacheItem", title)
-	ci.Lock()
-	fileInfo, err := os.Stat(ci.hostPath)
+	// Get VHDX info
+	ld, err := getLayerDetails(d.dir(id))
 	if err != nil {
-		logrus.Debugf("%s: releasing cacheItem on error path", title)
-		ci.Unlock()
-		return nil, fmt.Errorf("%s failed to stat %s: %s", title, ci.hostPath, err)
+		logrus.Debugf("%s: failed to get vhdx information of %s: %s", title, d.dir(id), err)
+		return nil, err
 	}
-	logrus.Debugf("%s: releasing cacheItem", title)
-	ci.Unlock()
 
 	// Start the SVM with a mapped virtual disk. Note that if the SVM is
-	// already runing and we are in global mode, this will be
+	// already running and we are in global mode, this will be
 	// hot-added.
-	mvd := &hcsshim.MappedVirtualDisk{
-		HostPath:          ci.hostPath,
-		ContainerPath:     ci.uvmPath,
+	mvd := hcsshim.MappedVirtualDisk{
+		HostPath:          ld.filename,
+		ContainerPath:     hostToGuest(ld.filename),
 		CreateInUtilityVM: true,
 		ReadOnly:          true,
 	}
 
 	logrus.Debugf("%s: starting service VM", title)
-	svm, err := d.startServiceVMIfNotRunning(id, mvd, fmt.Sprintf("diff %s", id))
+	svm, err := d.startServiceVMIfNotRunning(id, []hcsshim.MappedVirtualDisk{mvd}, fmt.Sprintf("diff %s", id))
 	if err != nil {
 		return nil, err
 	}
 
-	// Set `isMounted` for the cache item. Note that we re-scan the cache
-	// at this point as it's possible the cacheItem changed during the long-
-	// running operation above when we weren't holding the cacheMutex lock.
-	logrus.Debugf("%s: locking cacheMutex for updating isMounted", title)
-	d.cacheMutex.Lock()
-	if _, ok := d.cache[id]; !ok {
-		logrus.Debugf("%s: releasing cacheMutex on error path of isMounted", title)
-		d.cacheMutex.Unlock()
+	logrus.Debugf("lcowdriver: diff: waiting for svm to finish booting")
+	err = svm.getStartError()
+	if err != nil {
 		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
-		return nil, fmt.Errorf("%s fail as %s is not in the cache when updating isMounted", title, id)
+		return nil, fmt.Errorf("lcowdriver: diff: svm failed to boot: %s", err)
 	}
-	ci = d.cache[id]
-	ci.setIsMounted()
-	logrus.Debugf("%s: releasing cacheMutex for updating isMounted", title)
-	d.cacheMutex.Unlock()
 
 	// Obtain the tar stream for it
-	logrus.Debugf("%s %s, size %d, isSandbox %t", title, ci.hostPath, fileInfo.Size(), ci.isSandbox)
-	tarReadCloser, err := svm.config.VhdToTar(ci.hostPath, ci.uvmPath, ci.isSandbox, fileInfo.Size())
+	logrus.Debugf("%s: %s %s, size %d, ReadOnly %t", title, ld.filename, mvd.ContainerPath, ld.size, ld.isSandbox)
+	tarReadCloser, err := svm.config.VhdToTar(mvd.HostPath, mvd.ContainerPath, ld.isSandbox, ld.size)
 	if err != nil {
+		svm.hotRemoveVHDs(mvd)
 		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
 		return nil, fmt.Errorf("%s failed to export layer to tar stream for id: %s, parent: %s : %s", title, id, parent, err)
 	}
@@ -878,14 +796,12 @@
 	logrus.Debugf("%s id %s parent %s completed successfully", title, id, parent)
 
 	// In safe/non-global mode, we can't tear down the service VM until things have been read.
-	if !d.globalMode {
-		return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
-			tarReadCloser.Close()
-			d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
-			return nil
-		}), nil
-	}
-	return tarReadCloser, nil
+	return ioutils.NewReadCloserWrapper(tarReadCloser, func() error {
+		tarReadCloser.Close()
+		svm.hotRemoveVHDs(mvd)
+		d.terminateServiceVM(id, fmt.Sprintf("diff %s", id), false)
+		return nil
+	}), nil
 }
 
 // ApplyDiff extracts the changeset from the given diff into the
@@ -902,6 +818,12 @@
 	}
 	defer d.terminateServiceVM(id, fmt.Sprintf("applydiff %s", id), false)
 
+	logrus.Debugf("lcowdriver: applydiff: waiting for svm to finish booting")
+	err = svm.getStartError()
+	if err != nil {
+		return 0, fmt.Errorf("lcowdriver: applydiff: svm failed to boot: %s", err)
+	}
+
 	// TODO @jhowardmsft - the retries are temporary to overcome platform reliablity issues.
 	// Obviously this will be removed as platform bugs are fixed.
 	retries := 0
@@ -944,6 +866,11 @@
 	return m, nil
 }
 
+// GetLayerPath gets the layer path on host (path to VHD/VHDX)
+func (d *Driver) GetLayerPath(id string) (string, error) {
+	return d.dir(id), nil
+}
+
 // dir returns the absolute path to the layer.
 func (d *Driver) dir(id string) string {
 	return filepath.Join(d.dataRoot, filepath.Base(id))
@@ -1006,3 +933,34 @@
 
 	return ld, nil
 }
+
+func (d *Driver) getAllMounts(id string) ([]hcsshim.MappedVirtualDisk, error) {
+	layerChain, err := d.getLayerChain(id)
+	if err != nil {
+		return nil, err
+	}
+	layerChain = append([]string{d.dir(id)}, layerChain...)
+
+	logrus.Debugf("getting all  layers: %v", layerChain)
+	disks := make([]hcsshim.MappedVirtualDisk, len(layerChain), len(layerChain))
+	for i := range layerChain {
+		ld, err := getLayerDetails(layerChain[i])
+		if err != nil {
+			logrus.Debugf("Failed to get LayerVhdDetails from %s: %s", layerChain[i], err)
+			return nil, err
+		}
+		disks[i].HostPath = ld.filename
+		disks[i].ContainerPath = hostToGuest(ld.filename)
+		disks[i].CreateInUtilityVM = true
+		disks[i].ReadOnly = !ld.isSandbox
+	}
+	return disks, nil
+}
+
+func hostToGuest(hostpath string) string {
+	return fmt.Sprintf("/tmp/%s", filepath.Base(filepath.Dir(hostpath)))
+}
+
+func unionMountName(disks []hcsshim.MappedVirtualDisk) string {
+	return fmt.Sprintf("%s-mount", disks[0].ContainerPath)
+}
diff --git a/daemon/graphdriver/lcow/lcow_svm.go b/daemon/graphdriver/lcow/lcow_svm.go
new file mode 100644
index 0000000..26f6df4
--- /dev/null
+++ b/daemon/graphdriver/lcow/lcow_svm.go
@@ -0,0 +1,373 @@
+// +build windows
+
+package lcow
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/Microsoft/hcsshim"
+	"github.com/Microsoft/opengcs/client"
+	"github.com/sirupsen/logrus"
+)
+
+// Code for all the service VM management for the LCOW graphdriver
+
+var errVMisTerminating = errors.New("service VM is shutting down")
+var errVMUnknown = errors.New("service vm id is unknown")
+var errVMStillHasReference = errors.New("Attemping to delete a VM that is still being used")
+
+// serviceVMMap is the struct representing the id -> service VM mapping.
+type serviceVMMap struct {
+	sync.Mutex
+	svms map[string]*serviceVMMapItem
+}
+
+// serviceVMMapItem is our internal structure representing an item in our
+// map of service VMs we are maintaining.
+type serviceVMMapItem struct {
+	svm      *serviceVM // actual service vm object
+	refCount int        // refcount for VM
+}
+
+type serviceVM struct {
+	sync.Mutex                     // Serialises operations being performed in this service VM.
+	scratchAttached bool           // Has a scratch been attached?
+	config          *client.Config // Represents the service VM item.
+
+	// Indicates that the vm is started
+	startStatus chan interface{}
+	startError  error
+
+	// Indicates that the vm is stopped
+	stopStatus chan interface{}
+	stopError  error
+
+	attachedVHDs map[string]int // Map ref counting all the VHDS we've hot-added/hot-removed.
+	unionMounts  map[string]int // Map ref counting all the union filesystems we mounted.
+}
+
+// add will add an id to the service vm map. There are three cases:
+// 	- entry doesn't exist:
+// 		- add id to map and return a new vm that the caller can manually configure+start
+//	- entry does exist
+//  	- return vm in map and increment ref count
+//  - entry does exist but the ref count is 0
+//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
+func (svmMap *serviceVMMap) add(id string) (svm *serviceVM, alreadyExists bool, err error) {
+	svmMap.Lock()
+	defer svmMap.Unlock()
+	if svm, ok := svmMap.svms[id]; ok {
+		if svm.refCount == 0 {
+			return svm.svm, true, errVMisTerminating
+		}
+		svm.refCount++
+		return svm.svm, true, nil
+	}
+
+	// Doesn't exist, so create an empty svm to put into map and return
+	newSVM := &serviceVM{
+		startStatus:  make(chan interface{}),
+		stopStatus:   make(chan interface{}),
+		attachedVHDs: make(map[string]int),
+		unionMounts:  make(map[string]int),
+		config:       &client.Config{},
+	}
+	svmMap.svms[id] = &serviceVMMapItem{
+		svm:      newSVM,
+		refCount: 1,
+	}
+	return newSVM, false, nil
+}
+
+// get will get the service vm from the map. There are three cases:
+// 	- entry doesn't exist:
+// 		- return errVMUnknown
+//	- entry does exist
+//  	- return vm with no error
+//  - entry does exist but the ref count is 0
+//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
+func (svmMap *serviceVMMap) get(id string) (*serviceVM, error) {
+	svmMap.Lock()
+	defer svmMap.Unlock()
+	svm, ok := svmMap.svms[id]
+	if !ok {
+		return nil, errVMUnknown
+	}
+	if svm.refCount == 0 {
+		return svm.svm, errVMisTerminating
+	}
+	return svm.svm, nil
+}
+
+// decrementRefCount decrements the ref count of the given ID from the map. There are four cases:
+// 	- entry doesn't exist:
+// 		- return errVMUnknown
+//  - entry does exist but the ref count is 0
+//		- return the svm and errVMisTerminating. Caller can call svm.getStopError() to wait for stop
+//	- entry does exist but ref count is 1
+//  	- return vm and set lastRef to true. The caller can then stop the vm, delete the id from this map
+//      - and execute svm.signalStopFinished to signal the threads that the svm has been terminated.
+//	- entry does exist and ref count > 1
+//		- just reduce ref count and return svm
+func (svmMap *serviceVMMap) decrementRefCount(id string) (_ *serviceVM, lastRef bool, _ error) {
+	svmMap.Lock()
+	defer svmMap.Unlock()
+
+	svm, ok := svmMap.svms[id]
+	if !ok {
+		return nil, false, errVMUnknown
+	}
+	if svm.refCount == 0 {
+		return svm.svm, false, errVMisTerminating
+	}
+	svm.refCount--
+	return svm.svm, svm.refCount == 0, nil
+}
+
+// setRefCountZero works the same way as decrementRefCount, but sets ref count to 0 instead of decrementing it.
+func (svmMap *serviceVMMap) setRefCountZero(id string) (*serviceVM, error) {
+	svmMap.Lock()
+	defer svmMap.Unlock()
+
+	svm, ok := svmMap.svms[id]
+	if !ok {
+		return nil, errVMUnknown
+	}
+	if svm.refCount == 0 {
+		return svm.svm, errVMisTerminating
+	}
+	svm.refCount = 0
+	return svm.svm, nil
+}
+
+// deleteID deletes the given ID from the map. If the refcount is not 0 or the
+// VM does not exist, then this function returns an error.
+func (svmMap *serviceVMMap) deleteID(id string) error {
+	svmMap.Lock()
+	defer svmMap.Unlock()
+	svm, ok := svmMap.svms[id]
+	if !ok {
+		return errVMUnknown
+	}
+	if svm.refCount != 0 {
+		return errVMStillHasReference
+	}
+	delete(svmMap.svms, id)
+	return nil
+}
+
+func (svm *serviceVM) signalStartFinished(err error) {
+	svm.Lock()
+	svm.startError = err
+	svm.Unlock()
+	close(svm.startStatus)
+}
+
+func (svm *serviceVM) getStartError() error {
+	<-svm.startStatus
+	svm.Lock()
+	defer svm.Unlock()
+	return svm.startError
+}
+
+func (svm *serviceVM) signalStopFinished(err error) {
+	svm.Lock()
+	svm.stopError = err
+	svm.Unlock()
+	close(svm.stopStatus)
+}
+
+func (svm *serviceVM) getStopError() error {
+	<-svm.stopStatus
+	svm.Lock()
+	defer svm.Unlock()
+	return svm.stopError
+}
+
+// hotAddVHDs waits for the service vm to start and then attaches the vhds.
+func (svm *serviceVM) hotAddVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
+	if err := svm.getStartError(); err != nil {
+		return err
+	}
+	return svm.hotAddVHDsAtStart(mvds...)
+}
+
+// hotAddVHDsAtStart works the same way as hotAddVHDs but does not wait for the VM to start.
+func (svm *serviceVM) hotAddVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
+	svm.Lock()
+	defer svm.Unlock()
+	for i, mvd := range mvds {
+		if _, ok := svm.attachedVHDs[mvd.HostPath]; ok {
+			svm.attachedVHDs[mvd.HostPath]++
+			continue
+		}
+
+		if err := svm.config.HotAddVhd(mvd.HostPath, mvd.ContainerPath, mvd.ReadOnly, !mvd.AttachOnly); err != nil {
+			svm.hotRemoveVHDsAtStart(mvds[:i]...)
+			return err
+		}
+		svm.attachedVHDs[mvd.HostPath] = 1
+	}
+	return nil
+}
+
+// hotRemoveVHDs waits for the service vm to start and then removes the vhds.
+func (svm *serviceVM) hotRemoveVHDs(mvds ...hcsshim.MappedVirtualDisk) error {
+	if err := svm.getStartError(); err != nil {
+		return err
+	}
+	return svm.hotRemoveVHDsAtStart(mvds...)
+}
+
+// hotRemoveVHDsAtStart works the same way as hotRemoveVHDs but does not wait for the VM to start.
+func (svm *serviceVM) hotRemoveVHDsAtStart(mvds ...hcsshim.MappedVirtualDisk) error {
+	svm.Lock()
+	defer svm.Unlock()
+	var retErr error
+	for _, mvd := range mvds {
+		if _, ok := svm.attachedVHDs[mvd.HostPath]; !ok {
+			// We continue instead of returning an error if we try to hot remove a non-existent VHD.
+			// This is because one of the callers of the function is graphdriver.Put(). Since graphdriver.Get()
+			// defers the VM start to the first operation, it's possible that nothing have been hot-added
+			// when Put() is called. To avoid Put returning an error in that case, we simply continue if we
+			// don't find the vhd attached.
+			continue
+		}
+
+		if svm.attachedVHDs[mvd.HostPath] > 1 {
+			svm.attachedVHDs[mvd.HostPath]--
+			continue
+		}
+
+		// last VHD, so remove from VM and map
+		if err := svm.config.HotRemoveVhd(mvd.HostPath); err == nil {
+			delete(svm.attachedVHDs, mvd.HostPath)
+		} else {
+			// Take note of the error, but still continue to remove the other VHDs
+			logrus.Warnf("Failed to hot remove %s: %s", mvd.HostPath, err)
+			if retErr == nil {
+				retErr = err
+			}
+		}
+	}
+	return retErr
+}
+
+func (svm *serviceVM) createExt4VHDX(destFile string, sizeGB uint32, cacheFile string) error {
+	if err := svm.getStartError(); err != nil {
+		return err
+	}
+
+	svm.Lock()
+	defer svm.Unlock()
+	return svm.config.CreateExt4Vhdx(destFile, sizeGB, cacheFile)
+}
+
+func (svm *serviceVM) createUnionMount(mountName string, mvds ...hcsshim.MappedVirtualDisk) (err error) {
+	if len(mvds) == 0 {
+		return fmt.Errorf("createUnionMount: error must have at least 1 layer")
+	}
+
+	if err = svm.getStartError(); err != nil {
+		return err
+	}
+
+	svm.Lock()
+	defer svm.Unlock()
+	if _, ok := svm.unionMounts[mountName]; ok {
+		svm.unionMounts[mountName]++
+		return nil
+	}
+
+	var lowerLayers []string
+	if mvds[0].ReadOnly {
+		lowerLayers = append(lowerLayers, mvds[0].ContainerPath)
+	}
+
+	for i := 1; i < len(mvds); i++ {
+		lowerLayers = append(lowerLayers, mvds[i].ContainerPath)
+	}
+
+	logrus.Debugf("Doing the overlay mount with union directory=%s", mountName)
+	if err = svm.runProcess(fmt.Sprintf("mkdir -p %s", mountName), nil, nil, nil); err != nil {
+		return err
+	}
+
+	var cmd string
+	if mvds[0].ReadOnly {
+		// Readonly overlay
+		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s %s",
+			strings.Join(lowerLayers, ","),
+			mountName)
+	} else {
+		upper := fmt.Sprintf("%s/upper", mvds[0].ContainerPath)
+		work := fmt.Sprintf("%s/work", mvds[0].ContainerPath)
+
+		if err = svm.runProcess(fmt.Sprintf("mkdir -p %s %s", upper, work), nil, nil, nil); err != nil {
+			return err
+		}
+
+		cmd = fmt.Sprintf("mount -t overlay overlay -olowerdir=%s,upperdir=%s,workdir=%s %s",
+			strings.Join(lowerLayers, ":"),
+			upper,
+			work,
+			mountName)
+	}
+
+	logrus.Debugf("createUnionMount: Executing mount=%s", cmd)
+	if err = svm.runProcess(cmd, nil, nil, nil); err != nil {
+		return err
+	}
+
+	svm.unionMounts[mountName] = 1
+	return nil
+}
+
+func (svm *serviceVM) deleteUnionMount(mountName string, disks ...hcsshim.MappedVirtualDisk) error {
+	if err := svm.getStartError(); err != nil {
+		return err
+	}
+
+	svm.Lock()
+	defer svm.Unlock()
+	if _, ok := svm.unionMounts[mountName]; !ok {
+		return nil
+	}
+
+	if svm.unionMounts[mountName] > 1 {
+		svm.unionMounts[mountName]--
+		return nil
+	}
+
+	logrus.Debugf("Removing union mount %s", mountName)
+	if err := svm.runProcess(fmt.Sprintf("umount %s", mountName), nil, nil, nil); err != nil {
+		return err
+	}
+
+	delete(svm.unionMounts, mountName)
+	return nil
+}
+
+func (svm *serviceVM) runProcess(command string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error {
+	process, err := svm.config.RunProcess(command, stdin, stdout, stderr)
+	if err != nil {
+		return err
+	}
+	defer process.Close()
+
+	process.WaitTimeout(time.Duration(int(time.Second) * svm.config.UvmTimeoutSeconds))
+	exitCode, err := process.ExitCode()
+	if err != nil {
+		return err
+	}
+
+	if exitCode != 0 {
+		return fmt.Errorf("svm.runProcess: command %s failed with exit code %d", command, exitCode)
+	}
+	return nil
+}
diff --git a/daemon/graphdriver/lcow/remotefs.go b/daemon/graphdriver/lcow/remotefs.go
new file mode 100644
index 0000000..148e3c0
--- /dev/null
+++ b/daemon/graphdriver/lcow/remotefs.go
@@ -0,0 +1,139 @@
+// +build windows
+
+package lcow
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"runtime"
+	"strings"
+	"sync"
+
+	"github.com/Microsoft/hcsshim"
+	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
+	"github.com/sirupsen/logrus"
+)
+
+type lcowfs struct {
+	root        string
+	d           *Driver
+	mappedDisks []hcsshim.MappedVirtualDisk
+	vmID        string
+	currentSVM  *serviceVM
+	sync.Mutex
+}
+
+var _ containerfs.ContainerFS = &lcowfs{}
+
+// ErrNotSupported is an error for unsupported operations in the remotefs
+var ErrNotSupported = fmt.Errorf("not supported")
+
+// Functions to implement the ContainerFS interface
+func (l *lcowfs) Path() string {
+	return l.root
+}
+
+func (l *lcowfs) ResolveScopedPath(path string, rawPath bool) (string, error) {
+	logrus.Debugf("remotefs.resolvescopedpath inputs: %s %s ", path, l.root)
+
+	arg1 := l.Join(l.root, path)
+	if !rawPath {
+		// The l.Join("/", path) will make path an absolute path and then clean it
+		// so if path = ../../X, it will become /X.
+		arg1 = l.Join(l.root, l.Join("/", path))
+	}
+	arg2 := l.root
+
+	output := &bytes.Buffer{}
+	if err := l.runRemoteFSProcess(nil, output, remotefs.ResolvePathCmd, arg1, arg2); err != nil {
+		return "", err
+	}
+
+	logrus.Debugf("remotefs.resolvescopedpath success. Output: %s\n", output.String())
+	return output.String(), nil
+}
+
+func (l *lcowfs) OS() string {
+	return "linux"
+}
+
+func (l *lcowfs) Architecture() string {
+	return runtime.GOARCH
+}
+
+// Other functions that are used by docker like the daemon Archiver/Extractor
+func (l *lcowfs) ExtractArchive(src io.Reader, dst string, opts *archive.TarOptions) error {
+	logrus.Debugf("remotefs.ExtractArchve inputs: %s %+v", dst, opts)
+
+	tarBuf := &bytes.Buffer{}
+	if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
+		return fmt.Errorf("failed to marshall tar opts: %s", err)
+	}
+
+	input := io.MultiReader(tarBuf, src)
+	if err := l.runRemoteFSProcess(input, nil, remotefs.ExtractArchiveCmd, dst); err != nil {
+		return fmt.Errorf("failed to extract archive to %s: %s", dst, err)
+	}
+	return nil
+}
+
+func (l *lcowfs) ArchivePath(src string, opts *archive.TarOptions) (io.ReadCloser, error) {
+	logrus.Debugf("remotefs.ArchivePath: %s %+v", src, opts)
+
+	tarBuf := &bytes.Buffer{}
+	if err := remotefs.WriteTarOptions(tarBuf, opts); err != nil {
+		return nil, fmt.Errorf("failed to marshall tar opts: %s", err)
+	}
+
+	r, w := io.Pipe()
+	go func() {
+		defer w.Close()
+		if err := l.runRemoteFSProcess(tarBuf, w, remotefs.ArchivePathCmd, src); err != nil {
+			logrus.Debugf("REMOTEFS: Failed to extract archive: %s %+v %s", src, opts, err)
+		}
+	}()
+	return r, nil
+}
+
+// Helper functions
+func (l *lcowfs) startVM() error {
+	l.Lock()
+	defer l.Unlock()
+	if l.currentSVM != nil {
+		return nil
+	}
+
+	svm, err := l.d.startServiceVMIfNotRunning(l.vmID, l.mappedDisks, fmt.Sprintf("lcowfs.startVM"))
+	if err != nil {
+		return err
+	}
+
+	if err = svm.createUnionMount(l.root, l.mappedDisks...); err != nil {
+		return err
+	}
+	l.currentSVM = svm
+	return nil
+}
+
+func (l *lcowfs) runRemoteFSProcess(stdin io.Reader, stdout io.Writer, args ...string) error {
+	if err := l.startVM(); err != nil {
+		return err
+	}
+
+	// Append remotefs prefix and setup as a command line string
+	cmd := fmt.Sprintf("%s %s", remotefs.RemotefsCmd, strings.Join(args, " "))
+	stderr := &bytes.Buffer{}
+	if err := l.currentSVM.runProcess(cmd, stdin, stdout, stderr); err != nil {
+		return err
+	}
+
+	eerr, err := remotefs.ReadError(stderr)
+	if eerr != nil {
+		// Process returned an error so return that.
+		return remotefs.ExportedToError(eerr)
+	}
+	return err
+}
diff --git a/daemon/graphdriver/lcow/remotefs_file.go b/daemon/graphdriver/lcow/remotefs_file.go
new file mode 100644
index 0000000..c134319
--- /dev/null
+++ b/daemon/graphdriver/lcow/remotefs_file.go
@@ -0,0 +1,211 @@
+// +build windows
+
+package lcow
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+
+	"github.com/Microsoft/hcsshim"
+	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
+	"github.com/containerd/continuity/driver"
+)
+
+type lcowfile struct {
+	process   hcsshim.Process
+	stdin     io.WriteCloser
+	stdout    io.ReadCloser
+	stderr    io.ReadCloser
+	fs        *lcowfs
+	guestPath string
+}
+
+func (l *lcowfs) Open(path string) (driver.File, error) {
+	return l.OpenFile(path, os.O_RDONLY, 0)
+}
+
+func (l *lcowfs) OpenFile(path string, flag int, perm os.FileMode) (_ driver.File, err error) {
+	flagStr := strconv.FormatInt(int64(flag), 10)
+	permStr := strconv.FormatUint(uint64(perm), 8)
+
+	commandLine := fmt.Sprintf("%s %s %s %s", remotefs.RemotefsCmd, remotefs.OpenFileCmd, flagStr, permStr)
+	env := make(map[string]string)
+	env["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:"
+	processConfig := &hcsshim.ProcessConfig{
+		EmulateConsole:    false,
+		CreateStdInPipe:   true,
+		CreateStdOutPipe:  true,
+		CreateStdErrPipe:  true,
+		CreateInUtilityVm: true,
+		WorkingDirectory:  "/bin",
+		Environment:       env,
+		CommandLine:       commandLine,
+	}
+
+	process, err := l.currentSVM.config.Uvm.CreateProcess(processConfig)
+	if err != nil {
+		return nil, fmt.Errorf("failed to open file %s: %s", path, err)
+	}
+
+	stdin, stdout, stderr, err := process.Stdio()
+	if err != nil {
+		process.Kill()
+		process.Close()
+		return nil, fmt.Errorf("failed to open file pipes %s: %s", path, err)
+	}
+
+	lf := &lcowfile{
+		process:   process,
+		stdin:     stdin,
+		stdout:    stdout,
+		stderr:    stderr,
+		fs:        l,
+		guestPath: path,
+	}
+
+	if _, err := lf.getResponse(); err != nil {
+		return nil, fmt.Errorf("failed to open file %s: %s", path, err)
+	}
+	return lf, nil
+}
+
+func (l *lcowfile) Read(b []byte) (int, error) {
+	hdr := &remotefs.FileHeader{
+		Cmd:  remotefs.Read,
+		Size: uint64(len(b)),
+	}
+
+	if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
+		return 0, err
+	}
+
+	buf, err := l.getResponse()
+	if err != nil {
+		return 0, nil
+	}
+
+	n := copy(b, buf)
+	return n, nil
+}
+
+func (l *lcowfile) Write(b []byte) (int, error) {
+	hdr := &remotefs.FileHeader{
+		Cmd:  remotefs.Write,
+		Size: uint64(len(b)),
+	}
+
+	if err := remotefs.WriteFileHeader(l.stdin, hdr, b); err != nil {
+		return 0, err
+	}
+
+	_, err := l.getResponse()
+	if err != nil {
+		return 0, nil
+	}
+
+	return len(b), nil
+}
+
+func (l *lcowfile) Seek(offset int64, whence int) (int64, error) {
+	seekHdr := &remotefs.SeekHeader{
+		Offset: offset,
+		Whence: int32(whence),
+	}
+
+	buf := &bytes.Buffer{}
+	if err := binary.Write(buf, binary.BigEndian, seekHdr); err != nil {
+		return 0, err
+	}
+
+	hdr := &remotefs.FileHeader{
+		Cmd:  remotefs.Write,
+		Size: uint64(buf.Len()),
+	}
+	if err := remotefs.WriteFileHeader(l.stdin, hdr, buf.Bytes()); err != nil {
+		return 0, err
+	}
+
+	resBuf, err := l.getResponse()
+	if err != nil {
+		return 0, err
+	}
+
+	var res int64
+	if err := binary.Read(bytes.NewBuffer(resBuf), binary.BigEndian, &res); err != nil {
+		return 0, err
+	}
+	return res, nil
+}
+
+func (l *lcowfile) Close() error {
+	hdr := &remotefs.FileHeader{
+		Cmd:  remotefs.Close,
+		Size: 0,
+	}
+
+	if err := remotefs.WriteFileHeader(l.stdin, hdr, nil); err != nil {
+		return err
+	}
+
+	_, err := l.getResponse()
+	return err
+}
+
+func (l *lcowfile) Readdir(n int) ([]os.FileInfo, error) {
+	nStr := strconv.FormatInt(int64(n), 10)
+
+	// Unlike the other File functions, this one can just be run without maintaining state,
+	// so just do the normal runRemoteFSProcess way.
+	buf := &bytes.Buffer{}
+	if err := l.fs.runRemoteFSProcess(nil, buf, remotefs.ReadDirCmd, l.guestPath, nStr); err != nil {
+		return nil, err
+	}
+
+	var info []remotefs.FileInfo
+	if err := json.Unmarshal(buf.Bytes(), &info); err != nil {
+		return nil, nil
+	}
+
+	osInfo := make([]os.FileInfo, len(info))
+	for i := range info {
+		osInfo[i] = &info[i]
+	}
+	return osInfo, nil
+}
+
+func (l *lcowfile) getResponse() ([]byte, error) {
+	hdr, err := remotefs.ReadFileHeader(l.stdout)
+	if err != nil {
+		return nil, err
+	}
+
+	if hdr.Cmd != remotefs.CmdOK {
+		// Something went wrong during the openfile in the server.
+		// Parse stderr and return that as an error
+		eerr, err := remotefs.ReadError(l.stderr)
+		if eerr != nil {
+			return nil, remotefs.ExportedToError(eerr)
+		}
+
+		// Maybe the parsing went wrong?
+		if err != nil {
+			return nil, err
+		}
+
+		// At this point, we know something went wrong in the remotefs program, but
+		// we we don't know why.
+		return nil, fmt.Errorf("unknown error")
+	}
+
+	// Successful command, we might have some data to read (for Read + Seek)
+	buf := make([]byte, hdr.Size, hdr.Size)
+	if _, err := io.ReadFull(l.stdout, buf); err != nil {
+		return nil, err
+	}
+	return buf, nil
+}
diff --git a/daemon/graphdriver/lcow/remotefs_filedriver.go b/daemon/graphdriver/lcow/remotefs_filedriver.go
new file mode 100644
index 0000000..a3e0d9e
--- /dev/null
+++ b/daemon/graphdriver/lcow/remotefs_filedriver.go
@@ -0,0 +1,123 @@
+// +build windows
+
+package lcow
+
+import (
+	"bytes"
+	"encoding/json"
+	"os"
+	"strconv"
+
+	"github.com/Microsoft/opengcs/service/gcsutils/remotefs"
+
+	"github.com/containerd/continuity/driver"
+	"github.com/sirupsen/logrus"
+)
+
+var _ driver.Driver = &lcowfs{}
+
+func (l *lcowfs) Readlink(p string) (string, error) {
+	logrus.Debugf("removefs.readlink args: %s", p)
+
+	result := &bytes.Buffer{}
+	if err := l.runRemoteFSProcess(nil, result, remotefs.ReadlinkCmd, p); err != nil {
+		return "", err
+	}
+	return result.String(), nil
+}
+
+func (l *lcowfs) Mkdir(path string, mode os.FileMode) error {
+	return l.mkdir(path, mode, remotefs.MkdirCmd)
+}
+
+func (l *lcowfs) MkdirAll(path string, mode os.FileMode) error {
+	return l.mkdir(path, mode, remotefs.MkdirAllCmd)
+}
+
+func (l *lcowfs) mkdir(path string, mode os.FileMode, cmd string) error {
+	modeStr := strconv.FormatUint(uint64(mode), 8)
+	logrus.Debugf("remotefs.%s args: %s %s", cmd, path, modeStr)
+	return l.runRemoteFSProcess(nil, nil, cmd, path, modeStr)
+}
+
+func (l *lcowfs) Remove(path string) error {
+	return l.remove(path, remotefs.RemoveCmd)
+}
+
+func (l *lcowfs) RemoveAll(path string) error {
+	return l.remove(path, remotefs.RemoveAllCmd)
+}
+
+func (l *lcowfs) remove(path string, cmd string) error {
+	logrus.Debugf("remotefs.%s args: %s", cmd, path)
+	return l.runRemoteFSProcess(nil, nil, cmd, path)
+}
+
+func (l *lcowfs) Link(oldname, newname string) error {
+	return l.link(oldname, newname, remotefs.LinkCmd)
+}
+
+func (l *lcowfs) Symlink(oldname, newname string) error {
+	return l.link(oldname, newname, remotefs.SymlinkCmd)
+}
+
+func (l *lcowfs) link(oldname, newname, cmd string) error {
+	logrus.Debugf("remotefs.%s args: %s %s", cmd, oldname, newname)
+	return l.runRemoteFSProcess(nil, nil, cmd, oldname, newname)
+}
+
+func (l *lcowfs) Lchown(name string, uid, gid int64) error {
+	uidStr := strconv.FormatInt(uid, 10)
+	gidStr := strconv.FormatInt(gid, 10)
+
+	logrus.Debugf("remotefs.lchown args: %s %s %s", name, uidStr, gidStr)
+	return l.runRemoteFSProcess(nil, nil, remotefs.LchownCmd, name, uidStr, gidStr)
+}
+
+// Lchmod changes the mode of an file not following symlinks.
+func (l *lcowfs) Lchmod(path string, mode os.FileMode) error {
+	modeStr := strconv.FormatUint(uint64(mode), 8)
+	logrus.Debugf("remotefs.lchmod args: %s %s", path, modeStr)
+	return l.runRemoteFSProcess(nil, nil, remotefs.LchmodCmd, path, modeStr)
+}
+
+func (l *lcowfs) Mknod(path string, mode os.FileMode, major, minor int) error {
+	modeStr := strconv.FormatUint(uint64(mode), 8)
+	majorStr := strconv.FormatUint(uint64(major), 10)
+	minorStr := strconv.FormatUint(uint64(minor), 10)
+
+	logrus.Debugf("remotefs.mknod args: %s %s %s %s", path, modeStr, majorStr, minorStr)
+	return l.runRemoteFSProcess(nil, nil, remotefs.MknodCmd, path, modeStr, majorStr, minorStr)
+}
+
+func (l *lcowfs) Mkfifo(path string, mode os.FileMode) error {
+	modeStr := strconv.FormatUint(uint64(mode), 8)
+	logrus.Debugf("remotefs.mkfifo args: %s %s", path, modeStr)
+	return l.runRemoteFSProcess(nil, nil, remotefs.MkfifoCmd, path, modeStr)
+}
+
+func (l *lcowfs) Stat(p string) (os.FileInfo, error) {
+	return l.stat(p, remotefs.StatCmd)
+}
+
+func (l *lcowfs) Lstat(p string) (os.FileInfo, error) {
+	return l.stat(p, remotefs.LstatCmd)
+}
+
+func (l *lcowfs) stat(path string, cmd string) (os.FileInfo, error) {
+	logrus.Debugf("remotefs.stat inputs: %s %s", cmd, path)
+
+	output := &bytes.Buffer{}
+	err := l.runRemoteFSProcess(nil, output, cmd, path)
+	if err != nil {
+		return nil, err
+	}
+
+	var fi remotefs.FileInfo
+	if err := json.Unmarshal(output.Bytes(), &fi); err != nil {
+		return nil, err
+	}
+
+	logrus.Debugf("remotefs.stat success. got: %v\n", fi)
+	return &fi, nil
+}
diff --git a/daemon/graphdriver/lcow/remotefs_pathdriver.go b/daemon/graphdriver/lcow/remotefs_pathdriver.go
new file mode 100644
index 0000000..95d3e71
--- /dev/null
+++ b/daemon/graphdriver/lcow/remotefs_pathdriver.go
@@ -0,0 +1,212 @@
+// +build windows
+
+package lcow
+
+import (
+	"errors"
+	"os"
+	pathpkg "path"
+	"path/filepath"
+	"sort"
+	"strings"
+
+	"github.com/containerd/continuity/pathdriver"
+)
+
+var _ pathdriver.PathDriver = &lcowfs{}
+
+// Continuity Path functions can be done locally
+func (l *lcowfs) Join(path ...string) string {
+	return pathpkg.Join(path...)
+}
+
+func (l *lcowfs) IsAbs(path string) bool {
+	return pathpkg.IsAbs(path)
+}
+
+func sameWord(a, b string) bool {
+	return a == b
+}
+
+// Implementation taken from the Go standard library
+func (l *lcowfs) Rel(basepath, targpath string) (string, error) {
+	baseVol := ""
+	targVol := ""
+	base := l.Clean(basepath)
+	targ := l.Clean(targpath)
+	if sameWord(targ, base) {
+		return ".", nil
+	}
+	base = base[len(baseVol):]
+	targ = targ[len(targVol):]
+	if base == "." {
+		base = ""
+	}
+	// Can't use IsAbs - `\a` and `a` are both relative in Windows.
+	baseSlashed := len(base) > 0 && base[0] == l.Separator()
+	targSlashed := len(targ) > 0 && targ[0] == l.Separator()
+	if baseSlashed != targSlashed || !sameWord(baseVol, targVol) {
+		return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
+	}
+	// Position base[b0:bi] and targ[t0:ti] at the first differing elements.
+	bl := len(base)
+	tl := len(targ)
+	var b0, bi, t0, ti int
+	for {
+		for bi < bl && base[bi] != l.Separator() {
+			bi++
+		}
+		for ti < tl && targ[ti] != l.Separator() {
+			ti++
+		}
+		if !sameWord(targ[t0:ti], base[b0:bi]) {
+			break
+		}
+		if bi < bl {
+			bi++
+		}
+		if ti < tl {
+			ti++
+		}
+		b0 = bi
+		t0 = ti
+	}
+	if base[b0:bi] == ".." {
+		return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
+	}
+	if b0 != bl {
+		// Base elements left. Must go up before going down.
+		seps := strings.Count(base[b0:bl], string(l.Separator()))
+		size := 2 + seps*3
+		if tl != t0 {
+			size += 1 + tl - t0
+		}
+		buf := make([]byte, size)
+		n := copy(buf, "..")
+		for i := 0; i < seps; i++ {
+			buf[n] = l.Separator()
+			copy(buf[n+1:], "..")
+			n += 3
+		}
+		if t0 != tl {
+			buf[n] = l.Separator()
+			copy(buf[n+1:], targ[t0:])
+		}
+		return string(buf), nil
+	}
+	return targ[t0:], nil
+}
+
+func (l *lcowfs) Base(path string) string {
+	return pathpkg.Base(path)
+}
+
+func (l *lcowfs) Dir(path string) string {
+	return pathpkg.Dir(path)
+}
+
+func (l *lcowfs) Clean(path string) string {
+	return pathpkg.Clean(path)
+}
+
+func (l *lcowfs) Split(path string) (dir, file string) {
+	return pathpkg.Split(path)
+}
+
+func (l *lcowfs) Separator() byte {
+	return '/'
+}
+
+func (l *lcowfs) Abs(path string) (string, error) {
+	// Abs is supposed to add the current working directory, which is meaningless in lcow.
+	// So, return an error.
+	return "", ErrNotSupported
+}
+
+// Implementation taken from the Go standard library
+func (l *lcowfs) Walk(root string, walkFn filepath.WalkFunc) error {
+	info, err := l.Lstat(root)
+	if err != nil {
+		err = walkFn(root, nil, err)
+	} else {
+		err = l.walk(root, info, walkFn)
+	}
+	if err == filepath.SkipDir {
+		return nil
+	}
+	return err
+}
+
+// walk recursively descends path, calling w.
+func (l *lcowfs) walk(path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
+	err := walkFn(path, info, nil)
+	if err != nil {
+		if info.IsDir() && err == filepath.SkipDir {
+			return nil
+		}
+		return err
+	}
+
+	if !info.IsDir() {
+		return nil
+	}
+
+	names, err := l.readDirNames(path)
+	if err != nil {
+		return walkFn(path, info, err)
+	}
+
+	for _, name := range names {
+		filename := l.Join(path, name)
+		fileInfo, err := l.Lstat(filename)
+		if err != nil {
+			if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
+				return err
+			}
+		} else {
+			err = l.walk(filename, fileInfo, walkFn)
+			if err != nil {
+				if !fileInfo.IsDir() || err != filepath.SkipDir {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+// readDirNames reads the directory named by dirname and returns
+// a sorted list of directory entries.
+func (l *lcowfs) readDirNames(dirname string) ([]string, error) {
+	f, err := l.Open(dirname)
+	if err != nil {
+		return nil, err
+	}
+	files, err := f.Readdir(-1)
+	f.Close()
+	if err != nil {
+		return nil, err
+	}
+
+	names := make([]string, len(files), len(files))
+	for i := range files {
+		names[i] = files[i].Name()
+	}
+
+	sort.Strings(names)
+	return names, nil
+}
+
+// Note that Go's filepath.FromSlash/ToSlash convert between OS paths and '/'. Since the path separator
+// for LCOW (and Unix) is '/', they are no-ops.
+func (l *lcowfs) FromSlash(path string) string {
+	return path
+}
+
+func (l *lcowfs) ToSlash(path string) string {
+	return path
+}
+
+func (l *lcowfs) Match(pattern, name string) (matched bool, err error) {
+	return pathpkg.Match(pattern, name)
+}
diff --git a/daemon/graphdriver/overlay/copy.go b/daemon/graphdriver/overlay/copy.go
deleted file mode 100644
index 8c35b91..0000000
--- a/daemon/graphdriver/overlay/copy.go
+++ /dev/null
@@ -1,175 +0,0 @@
-// +build linux
-
-package overlay
-
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"syscall"
-	"time"
-
-	"github.com/docker/docker/pkg/pools"
-	"github.com/docker/docker/pkg/system"
-	rsystem "github.com/opencontainers/runc/libcontainer/system"
-	"golang.org/x/sys/unix"
-)
-
-type copyFlags int
-
-const (
-	copyHardlink copyFlags = 1 << iota
-)
-
-func copyRegular(srcPath, dstPath string, mode os.FileMode) error {
-	srcFile, err := os.Open(srcPath)
-	if err != nil {
-		return err
-	}
-	defer srcFile.Close()
-
-	dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode)
-	if err != nil {
-		return err
-	}
-	defer dstFile.Close()
-
-	_, err = pools.Copy(dstFile, srcFile)
-
-	return err
-}
-
-func copyXattr(srcPath, dstPath, attr string) error {
-	data, err := system.Lgetxattr(srcPath, attr)
-	if err != nil {
-		return err
-	}
-	if data != nil {
-		if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func copyDir(srcDir, dstDir string, flags copyFlags) error {
-	err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error {
-		if err != nil {
-			return err
-		}
-
-		// Rebase path
-		relPath, err := filepath.Rel(srcDir, srcPath)
-		if err != nil {
-			return err
-		}
-
-		dstPath := filepath.Join(dstDir, relPath)
-		if err != nil {
-			return err
-		}
-
-		stat, ok := f.Sys().(*syscall.Stat_t)
-		if !ok {
-			return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath)
-		}
-
-		isHardlink := false
-
-		switch f.Mode() & os.ModeType {
-		case 0: // Regular file
-			if flags&copyHardlink != 0 {
-				isHardlink = true
-				if err := os.Link(srcPath, dstPath); err != nil {
-					return err
-				}
-			} else {
-				if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil {
-					return err
-				}
-			}
-
-		case os.ModeDir:
-			if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) {
-				return err
-			}
-
-		case os.ModeSymlink:
-			link, err := os.Readlink(srcPath)
-			if err != nil {
-				return err
-			}
-
-			if err := os.Symlink(link, dstPath); err != nil {
-				return err
-			}
-
-		case os.ModeNamedPipe:
-			fallthrough
-		case os.ModeSocket:
-			if rsystem.RunningInUserNS() {
-				// cannot create a device if running in user namespace
-				return nil
-			}
-			if err := unix.Mkfifo(dstPath, stat.Mode); err != nil {
-				return err
-			}
-
-		case os.ModeDevice:
-			if err := unix.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil {
-				return err
-			}
-
-		default:
-			return fmt.Errorf("unknown file type for %s", srcPath)
-		}
-
-		// Everything below is copying metadata from src to dst. All this metadata
-		// already shares an inode for hardlinks.
-		if isHardlink {
-			return nil
-		}
-
-		if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil {
-			return err
-		}
-
-		if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil {
-			return err
-		}
-
-		// We need to copy this attribute if it appears in an overlay upper layer, as
-		// this function is used to copy those. It is set by overlay if a directory
-		// is removed and then re-created and should not inherit anything from the
-		// same dir in the lower dir.
-		if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil {
-			return err
-		}
-
-		isSymlink := f.Mode()&os.ModeSymlink != 0
-
-		// There is no LChmod, so ignore mode for symlink. Also, this
-		// must happen after chown, as that can modify the file mode
-		if !isSymlink {
-			if err := os.Chmod(dstPath, f.Mode()); err != nil {
-				return err
-			}
-		}
-
-		// system.Chtimes doesn't support a NOFOLLOW flag atm
-		if !isSymlink {
-			aTime := time.Unix(stat.Atim.Sec, stat.Atim.Nsec)
-			mTime := time.Unix(stat.Mtim.Sec, stat.Mtim.Nsec)
-			if err := system.Chtimes(dstPath, aTime, mTime); err != nil {
-				return err
-			}
-		} else {
-			ts := []syscall.Timespec{stat.Atim, stat.Mtim}
-			if err := system.LUtimesNano(dstPath, ts); err != nil {
-				return err
-			}
-		}
-		return nil
-	})
-	return err
-}
diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go
index 0d14b04..3db84bc 100644
--- a/daemon/graphdriver/overlay/overlay.go
+++ b/daemon/graphdriver/overlay/overlay.go
@@ -13,8 +13,10 @@
 	"strconv"
 
 	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/daemon/graphdriver/copy"
 	"github.com/docker/docker/daemon/graphdriver/overlayutils"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/fsutils"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/locker"
@@ -269,10 +271,7 @@
 
 	// Toplevel images are just a "root" dir
 	if parent == "" {
-		if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil {
-			return err
-		}
-		return nil
+		return idtools.MkdirAndChown(path.Join(dir, "root"), 0755, idtools.IDPair{rootUID, rootGID})
 	}
 
 	parentDir := d.dir(parent)
@@ -329,7 +328,7 @@
 		return err
 	}
 
-	return copyDir(parentUpperDir, upperDir, 0)
+	return copy.DirCopy(parentUpperDir, upperDir, copy.Content)
 }
 
 func (d *Driver) dir(id string) string {
@@ -344,21 +343,21 @@
 }
 
 // Get creates and mounts the required file system for the given id and returns the mount path.
-func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
+func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, err error) {
 	d.locker.Lock(id)
 	defer d.locker.Unlock(id)
 	dir := d.dir(id)
 	if _, err := os.Stat(dir); err != nil {
-		return "", err
+		return nil, err
 	}
 	// If id has a root, just return it
 	rootDir := path.Join(dir, "root")
 	if _, err := os.Stat(rootDir); err == nil {
-		return rootDir, nil
+		return containerfs.NewLocalContainerFS(rootDir), nil
 	}
 	mergedDir := path.Join(dir, "merged")
 	if count := d.ctr.Increment(mergedDir); count > 1 {
-		return mergedDir, nil
+		return containerfs.NewLocalContainerFS(mergedDir), nil
 	}
 	defer func() {
 		if err != nil {
@@ -369,7 +368,7 @@
 	}()
 	lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id"))
 	if err != nil {
-		return "", err
+		return nil, err
 	}
 	var (
 		lowerDir = path.Join(d.dir(string(lowerID)), "root")
@@ -378,18 +377,18 @@
 		opts     = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir)
 	)
 	if err := unix.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil {
-		return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
+		return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
 	}
 	// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
 	// user namespace requires this to move a directory from lower to upper.
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
 	if err != nil {
-		return "", err
+		return nil, err
 	}
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
-		return "", err
+		return nil, err
 	}
-	return mergedDir, nil
+	return containerfs.NewLocalContainerFS(mergedDir), nil
 }
 
 // Put unmounts the mount path created for the give id.
@@ -445,7 +444,7 @@
 		}
 	}()
 
-	if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil {
+	if err = copy.DirCopy(parentRootDir, tmpRootDir, copy.Hardlink); err != nil {
 		return 0, err
 	}
 
diff --git a/daemon/graphdriver/overlay2/check.go b/daemon/graphdriver/overlay2/check.go
index 851c409..f29630b 100644
--- a/daemon/graphdriver/overlay2/check.go
+++ b/daemon/graphdriver/overlay2/check.go
@@ -8,6 +8,7 @@
 	"os"
 	"path"
 	"path/filepath"
+	"syscall"
 
 	"github.com/docker/docker/pkg/system"
 	"github.com/pkg/errors"
@@ -15,10 +16,11 @@
 	"golang.org/x/sys/unix"
 )
 
-// hasOpaqueCopyUpBug checks whether the filesystem has a bug
+// doesSupportNativeDiff checks whether the filesystem has a bug
 // which copies up the opaque flag when copying up an opaque
-// directory. When this bug exists naive diff should be used.
-func hasOpaqueCopyUpBug(d string) error {
+// directory or the kernel enable CONFIG_OVERLAY_FS_REDIRECT_DIR.
+// When these exist naive diff should be used.
+func doesSupportNativeDiff(d string) error {
 	td, err := ioutil.TempDir(d, "opaque-bug-check")
 	if err != nil {
 		return err
@@ -29,10 +31,13 @@
 		}
 	}()
 
-	// Make directories l1/d, l2/d, l3, work, merged
+	// Make directories l1/d, l1/d1, l2/d, l3, work, merged
 	if err := os.MkdirAll(filepath.Join(td, "l1", "d"), 0755); err != nil {
 		return err
 	}
+	if err := os.MkdirAll(filepath.Join(td, "l1", "d1"), 0755); err != nil {
+		return err
+	}
 	if err := os.MkdirAll(filepath.Join(td, "l2", "d"), 0755); err != nil {
 		return err
 	}
@@ -75,5 +80,23 @@
 		return errors.New("opaque flag erroneously copied up, consider update to kernel 4.8 or later to fix")
 	}
 
+	// rename "d1" to "d2"
+	if err := os.Rename(filepath.Join(td, "merged", "d1"), filepath.Join(td, "merged", "d2")); err != nil {
+		// if rename failed with syscall.EXDEV, the kernel doesn't have CONFIG_OVERLAY_FS_REDIRECT_DIR enabled
+		if err.(*os.LinkError).Err == syscall.EXDEV {
+			return nil
+		}
+		return errors.Wrap(err, "failed to rename dir in merged directory")
+	}
+	// get the xattr of "d2"
+	xattrRedirect, err := system.Lgetxattr(filepath.Join(td, "l3", "d2"), "trusted.overlay.redirect")
+	if err != nil {
+		return errors.Wrap(err, "failed to read redirect flag on upper layer")
+	}
+
+	if string(xattrRedirect) == "d1" {
+		return errors.New("kernel has CONFIG_OVERLAY_FS_REDIRECT_DIR enabled")
+	}
+
 	return nil
 }
diff --git a/daemon/graphdriver/overlay2/mount.go b/daemon/graphdriver/overlay2/mount.go
index 77bff06..e59178c 100644
--- a/daemon/graphdriver/overlay2/mount.go
+++ b/daemon/graphdriver/overlay2/mount.go
@@ -49,18 +49,19 @@
 	output := bytes.NewBuffer(nil)
 	cmd.Stdout = output
 	cmd.Stderr = output
-
 	if err := cmd.Start(); err != nil {
+		w.Close()
 		return fmt.Errorf("mountfrom error on re-exec cmd: %v", err)
 	}
 	//write the options to the pipe for the untar exec to read
 	if err := json.NewEncoder(w).Encode(options); err != nil {
+		w.Close()
 		return fmt.Errorf("mountfrom json encode to pipe failed: %v", err)
 	}
 	w.Close()
 
 	if err := cmd.Wait(); err != nil {
-		return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output)
+		return fmt.Errorf("mountfrom re-exec error: %v: output: %v", err, output)
 	}
 	return nil
 }
diff --git a/daemon/graphdriver/overlay2/overlay.go b/daemon/graphdriver/overlay2/overlay.go
index 6b0d4be..0f252e6 100644
--- a/daemon/graphdriver/overlay2/overlay.go
+++ b/daemon/graphdriver/overlay2/overlay.go
@@ -23,6 +23,7 @@
 	"github.com/docker/docker/daemon/graphdriver/quota"
 	"github.com/docker/docker/pkg/archive"
 	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/directory"
 	"github.com/docker/docker/pkg/fsutils"
 	"github.com/docker/docker/pkg/idtools"
@@ -266,8 +267,8 @@
 
 func useNaiveDiff(home string) bool {
 	useNaiveDiffLock.Do(func() {
-		if err := hasOpaqueCopyUpBug(home); err != nil {
-			logrus.Warnf("Not using native diff for overlay2: %v", err)
+		if err := doesSupportNativeDiff(home); err != nil {
+			logrus.Warnf("Not using native diff for overlay2, this may cause degraded performance for building images: %v", err)
 			useNaiveDiffOnly = true
 		}
 	})
@@ -514,12 +515,12 @@
 }
 
 // Get creates and mounts the required file system for the given id and returns the mount path.
-func (d *Driver) Get(id string, mountLabel string) (s string, err error) {
+func (d *Driver) Get(id, mountLabel string) (_ containerfs.ContainerFS, retErr error) {
 	d.locker.Lock(id)
 	defer d.locker.Unlock(id)
 	dir := d.dir(id)
 	if _, err := os.Stat(dir); err != nil {
-		return "", err
+		return nil, err
 	}
 
 	diffDir := path.Join(dir, "diff")
@@ -527,19 +528,21 @@
 	if err != nil {
 		// If no lower, just return diff directory
 		if os.IsNotExist(err) {
-			return diffDir, nil
+			return containerfs.NewLocalContainerFS(diffDir), nil
 		}
-		return "", err
+		return nil, err
 	}
 
 	mergedDir := path.Join(dir, "merged")
 	if count := d.ctr.Increment(mergedDir); count > 1 {
-		return mergedDir, nil
+		return containerfs.NewLocalContainerFS(mergedDir), nil
 	}
 	defer func() {
-		if err != nil {
+		if retErr != nil {
 			if c := d.ctr.Decrement(mergedDir); c <= 0 {
-				unix.Unmount(mergedDir, 0)
+				if mntErr := unix.Unmount(mergedDir, 0); mntErr != nil {
+					logrus.Errorf("error unmounting %v: %v", mergedDir, mntErr)
+				}
 			}
 		}
 	}()
@@ -574,7 +577,7 @@
 		opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work"))
 		mountData = label.FormatMountLabel(opts, mountLabel)
 		if len(mountData) > pageSize {
-			return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
+			return nil, fmt.Errorf("cannot mount layer, mount label too large %d", len(mountData))
 		}
 
 		mount = func(source string, target string, mType string, flags uintptr, label string) error {
@@ -584,21 +587,21 @@
 	}
 
 	if err := mount("overlay", mountTarget, "overlay", 0, mountData); err != nil {
-		return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
+		return nil, fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err)
 	}
 
 	// chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a
 	// user namespace requires this to move a directory from lower to upper.
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
 	if err != nil {
-		return "", err
+		return nil, err
 	}
 
 	if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil {
-		return "", err
+		return nil, err
 	}
 
-	return mergedDir, nil
+	return containerfs.NewLocalContainerFS(mergedDir), nil
 }
 
 // Put unmounts the mount path created for the give id.
diff --git a/daemon/graphdriver/proxy.go b/daemon/graphdriver/proxy.go
index 120afad..81ef872 100644
--- a/daemon/graphdriver/proxy.go
+++ b/daemon/graphdriver/proxy.go
@@ -7,6 +7,7 @@
 	"path/filepath"
 
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/pkg/plugins"
@@ -129,20 +130,20 @@
 	return nil
 }
 
-func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) {
+func (d *graphDriverProxy) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
 	args := &graphDriverRequest{
 		ID:         id,
 		MountLabel: mountLabel,
 	}
 	var ret graphDriverResponse
 	if err := d.p.Client().Call("GraphDriver.Get", args, &ret); err != nil {
-		return "", err
+		return nil, err
 	}
 	var err error
 	if ret.Err != "" {
 		err = errors.New(ret.Err)
 	}
-	return filepath.Join(d.p.BasePath(), ret.Dir), err
+	return containerfs.NewLocalContainerFS(filepath.Join(d.p.BasePath(), ret.Dir)), err
 }
 
 func (d *graphDriverProxy) Put(id string) error {
diff --git a/daemon/graphdriver/quota/projectquota.go b/daemon/graphdriver/quota/projectquota.go
index 0e70515..84e391a 100644
--- a/daemon/graphdriver/quota/projectquota.go
+++ b/daemon/graphdriver/quota/projectquota.go
@@ -47,6 +47,8 @@
 #ifndef Q_XGETPQUOTA
 #define Q_XGETPQUOTA QCMD(Q_XGETQUOTA, PRJQUOTA)
 #endif
+
+const int Q_XGETQSTAT_PRJQUOTA = QCMD(Q_XGETQSTAT, PRJQUOTA);
 */
 import "C"
 import (
@@ -56,10 +58,15 @@
 	"path/filepath"
 	"unsafe"
 
+	"errors"
+
 	"github.com/sirupsen/logrus"
 	"golang.org/x/sys/unix"
 )
 
+// ErrQuotaNotSupported indicates if were found the FS does not have projects quotas available
+var ErrQuotaNotSupported = errors.New("Filesystem does not support or has not enabled quotas")
+
 // Quota limit params - currently we only control blocks hard limit
 type Quota struct {
 	Size uint64
@@ -97,6 +104,24 @@
 //
 func NewControl(basePath string) (*Control, error) {
 	//
+	// create backing filesystem device node
+	//
+	backingFsBlockDev, err := makeBackingFsDev(basePath)
+	if err != nil {
+		return nil, err
+	}
+
+	// check if we can call quotactl with project quotas
+	// as a mechanism to determine (early) if we have support
+	hasQuotaSupport, err := hasQuotaSupport(backingFsBlockDev)
+	if err != nil {
+		return nil, err
+	}
+	if !hasQuotaSupport {
+		return nil, ErrQuotaNotSupported
+	}
+
+	//
 	// Get project id of parent dir as minimal id to be used by driver
 	//
 	minProjectID, err := getProjectID(basePath)
@@ -106,14 +131,6 @@
 	minProjectID++
 
 	//
-	// create backing filesystem device node
-	//
-	backingFsBlockDev, err := makeBackingFsDev(basePath)
-	if err != nil {
-		return nil, err
-	}
-
-	//
 	// Test if filesystem supports project quotas by trying to set
 	// a quota on the first available project id
 	//
@@ -335,3 +352,23 @@
 
 	return backingFsBlockDev, nil
 }
+
+func hasQuotaSupport(backingFsBlockDev string) (bool, error) {
+	var cs = C.CString(backingFsBlockDev)
+	defer free(cs)
+	var qstat C.fs_quota_stat_t
+
+	_, _, errno := unix.Syscall6(unix.SYS_QUOTACTL, uintptr(C.Q_XGETQSTAT_PRJQUOTA), uintptr(unsafe.Pointer(cs)), 0, uintptr(unsafe.Pointer(&qstat)), 0, 0)
+	if errno == 0 && qstat.qs_flags&C.FS_QUOTA_PDQ_ENFD > 0 && qstat.qs_flags&C.FS_QUOTA_PDQ_ACCT > 0 {
+		return true, nil
+	}
+
+	switch errno {
+	// These are the known fatal errors, consider all other errors (ENOTTY, etc.. not supporting quota)
+	case unix.EFAULT, unix.ENOENT, unix.ENOTBLK, unix.EPERM:
+	default:
+		return false, nil
+	}
+
+	return false, errno
+}
diff --git a/daemon/graphdriver/quota/projectquota_test.go b/daemon/graphdriver/quota/projectquota_test.go
new file mode 100644
index 0000000..2b47a58
--- /dev/null
+++ b/daemon/graphdriver/quota/projectquota_test.go
@@ -0,0 +1,161 @@
+// +build linux
+
+package quota
+
+import (
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/sys/unix"
+)
+
+// 10MB
+const testQuotaSize = 10 * 1024 * 1024
+const imageSize = 64 * 1024 * 1024
+
+func TestBlockDev(t *testing.T) {
+	mkfs, err := exec.LookPath("mkfs.xfs")
+	if err != nil {
+		t.Fatal("mkfs.xfs not installed")
+	}
+
+	// create a sparse image
+	imageFile, err := ioutil.TempFile("", "xfs-image")
+	if err != nil {
+		t.Fatal(err)
+	}
+	imageFileName := imageFile.Name()
+	defer os.Remove(imageFileName)
+	if _, err = imageFile.Seek(imageSize-1, 0); err != nil {
+		t.Fatal(err)
+	}
+	if _, err = imageFile.Write([]byte{0}); err != nil {
+		t.Fatal(err)
+	}
+	if err = imageFile.Close(); err != nil {
+		t.Fatal(err)
+	}
+
+	// The reason for disabling these options is sometimes people run with a newer userspace
+	// than kernelspace
+	out, err := exec.Command(mkfs, "-m", "crc=0,finobt=0", imageFileName).CombinedOutput()
+	if len(out) > 0 {
+		t.Log(string(out))
+	}
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	runTest(t, "testBlockDevQuotaDisabled", wrapMountTest(imageFileName, false, testBlockDevQuotaDisabled))
+	runTest(t, "testBlockDevQuotaEnabled", wrapMountTest(imageFileName, true, testBlockDevQuotaEnabled))
+	runTest(t, "testSmallerThanQuota", wrapMountTest(imageFileName, true, wrapQuotaTest(testSmallerThanQuota)))
+	runTest(t, "testBiggerThanQuota", wrapMountTest(imageFileName, true, wrapQuotaTest(testBiggerThanQuota)))
+	runTest(t, "testRetrieveQuota", wrapMountTest(imageFileName, true, wrapQuotaTest(testRetrieveQuota)))
+}
+
+func runTest(t *testing.T, testName string, testFunc func(*testing.T)) {
+	if success := t.Run(testName, testFunc); !success {
+		out, _ := exec.Command("dmesg").CombinedOutput()
+		t.Log(string(out))
+	}
+}
+
+func wrapMountTest(imageFileName string, enableQuota bool, testFunc func(t *testing.T, mountPoint, backingFsDev string)) func(*testing.T) {
+	return func(t *testing.T) {
+		mountOptions := "loop"
+
+		if enableQuota {
+			mountOptions = mountOptions + ",prjquota"
+		}
+
+		// create a mountPoint
+		mountPoint, err := ioutil.TempDir("", "xfs-mountPoint")
+		if err != nil {
+			t.Fatal(err)
+		}
+		defer os.RemoveAll(mountPoint)
+
+		out, err := exec.Command("mount", "-o", mountOptions, imageFileName, mountPoint).CombinedOutput()
+		if len(out) > 0 {
+			t.Log(string(out))
+		}
+		if err != nil {
+			t.Fatal("mount failed")
+		}
+
+		defer func() {
+			if err := unix.Unmount(mountPoint, 0); err != nil {
+				t.Fatal(err)
+			}
+		}()
+
+		backingFsDev, err := makeBackingFsDev(mountPoint)
+		require.NoError(t, err)
+
+		testFunc(t, mountPoint, backingFsDev)
+	}
+}
+
+func testBlockDevQuotaDisabled(t *testing.T, mountPoint, backingFsDev string) {
+	hasSupport, err := hasQuotaSupport(backingFsDev)
+	require.NoError(t, err)
+	assert.False(t, hasSupport)
+}
+
+func testBlockDevQuotaEnabled(t *testing.T, mountPoint, backingFsDev string) {
+	hasSupport, err := hasQuotaSupport(backingFsDev)
+	require.NoError(t, err)
+	assert.True(t, hasSupport)
+}
+
+func wrapQuotaTest(testFunc func(t *testing.T, ctrl *Control, mountPoint, testDir, testSubDir string)) func(t *testing.T, mountPoint, backingFsDev string) {
+	return func(t *testing.T, mountPoint, backingFsDev string) {
+		testDir, err := ioutil.TempDir(mountPoint, "per-test")
+		require.NoError(t, err)
+		defer os.RemoveAll(testDir)
+
+		ctrl, err := NewControl(testDir)
+		require.NoError(t, err)
+
+		testSubDir, err := ioutil.TempDir(testDir, "quota-test")
+		require.NoError(t, err)
+		testFunc(t, ctrl, mountPoint, testDir, testSubDir)
+	}
+
+}
+
+func testSmallerThanQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) {
+	require.NoError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize}))
+	smallerThanQuotaFile := filepath.Join(testSubDir, "smaller-than-quota")
+	require.NoError(t, ioutil.WriteFile(smallerThanQuotaFile, make([]byte, testQuotaSize/2), 0644))
+	require.NoError(t, os.Remove(smallerThanQuotaFile))
+}
+
+func testBiggerThanQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) {
+	// Make sure the quota is being enforced
+	// TODO: When we implement this under EXT4, we need to shed CAP_SYS_RESOURCE, otherwise
+	// we're able to violate quota without issue
+	require.NoError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize}))
+
+	biggerThanQuotaFile := filepath.Join(testSubDir, "bigger-than-quota")
+	err := ioutil.WriteFile(biggerThanQuotaFile, make([]byte, testQuotaSize+1), 0644)
+	require.Error(t, err)
+	if err == io.ErrShortWrite {
+		require.NoError(t, os.Remove(biggerThanQuotaFile))
+	}
+}
+
+func testRetrieveQuota(t *testing.T, ctrl *Control, homeDir, testDir, testSubDir string) {
+	// Validate that we can retrieve quota
+	require.NoError(t, ctrl.SetQuota(testSubDir, Quota{testQuotaSize}))
+
+	var q Quota
+	require.NoError(t, ctrl.GetQuota(testSubDir, &q))
+	assert.EqualValues(t, testQuotaSize, q.Size)
+}
diff --git a/daemon/graphdriver/register/register_devicemapper.go b/daemon/graphdriver/register/register_devicemapper.go
index bb2e9ef..09dfb71 100644
--- a/daemon/graphdriver/register/register_devicemapper.go
+++ b/daemon/graphdriver/register/register_devicemapper.go
@@ -1,4 +1,4 @@
-// +build !exclude_graphdriver_devicemapper,linux
+// +build !exclude_graphdriver_devicemapper,!static_build,linux
 
 package register
 
diff --git a/daemon/graphdriver/register/register_zfs.go b/daemon/graphdriver/register/register_zfs.go
index 8f34e35..8c31c41 100644
--- a/daemon/graphdriver/register/register_zfs.go
+++ b/daemon/graphdriver/register/register_zfs.go
@@ -1,4 +1,4 @@
-// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris
+// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd
 
 package register
 
diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go
index 15a4de3..0482dcc 100644
--- a/daemon/graphdriver/vfs/driver.go
+++ b/daemon/graphdriver/vfs/driver.go
@@ -7,6 +7,7 @@
 
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/chrootarchive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/system"
 	"github.com/opencontainers/selinux/go-selinux/label"
@@ -94,7 +95,7 @@
 	if err != nil {
 		return fmt.Errorf("%s: %s", parent, err)
 	}
-	return CopyWithTar(parentDir, dir)
+	return CopyWithTar(parentDir.Path(), dir)
 }
 
 func (d *Driver) dir(id string) string {
@@ -103,21 +104,18 @@
 
 // Remove deletes the content from the directory for a given id.
 func (d *Driver) Remove(id string) error {
-	if err := system.EnsureRemoveAll(d.dir(id)); err != nil {
-		return err
-	}
-	return nil
+	return system.EnsureRemoveAll(d.dir(id))
 }
 
 // Get returns the directory for the given id.
-func (d *Driver) Get(id, mountLabel string) (string, error) {
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
 	dir := d.dir(id)
 	if st, err := os.Stat(dir); err != nil {
-		return "", err
+		return nil, err
 	} else if !st.IsDir() {
-		return "", fmt.Errorf("%s: not a directory", dir)
+		return nil, fmt.Errorf("%s: not a directory", dir)
 	}
-	return dir, nil
+	return containerfs.NewLocalContainerFS(dir), nil
 }
 
 // Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up.
diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go
index 4411405..cc9ca52 100644
--- a/daemon/graphdriver/windows/windows.go
+++ b/daemon/graphdriver/windows/windows.go
@@ -26,6 +26,7 @@
 	"github.com/Microsoft/hcsshim"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/longpath"
@@ -153,19 +154,8 @@
 	}
 }
 
-// panicIfUsedByLcow does exactly what it says.
-// TODO @jhowardmsft - this is a temporary measure for the bring-up of
-// Linux containers on Windows. It is a failsafe to ensure that the right
-// graphdriver is used.
-func panicIfUsedByLcow() {
-	if system.LCOWSupported() {
-		panic("inconsistency - windowsfilter graphdriver should not be used when in LCOW mode")
-	}
-}
-
 // Exists returns true if the given id is registered with this driver.
 func (d *Driver) Exists(id string) bool {
-	panicIfUsedByLcow()
 	rID, err := d.resolveID(id)
 	if err != nil {
 		return false
@@ -180,7 +170,6 @@
 // CreateReadWrite creates a layer that is writable for use as a container
 // file system.
 func (d *Driver) CreateReadWrite(id, parent string, opts *graphdriver.CreateOpts) error {
-	panicIfUsedByLcow()
 	if opts != nil {
 		return d.create(id, parent, opts.MountLabel, false, opts.StorageOpt)
 	}
@@ -189,7 +178,6 @@
 
 // Create creates a new read-only layer with the given id.
 func (d *Driver) Create(id, parent string, opts *graphdriver.CreateOpts) error {
-	panicIfUsedByLcow()
 	if opts != nil {
 		return d.create(id, parent, opts.MountLabel, true, opts.StorageOpt)
 	}
@@ -273,7 +261,6 @@
 
 // Remove unmounts and removes the dir information.
 func (d *Driver) Remove(id string) error {
-	panicIfUsedByLcow()
 	rID, err := d.resolveID(id)
 	if err != nil {
 		return err
@@ -354,36 +341,35 @@
 }
 
 // Get returns the rootfs path for the id. This will mount the dir at its given path.
-func (d *Driver) Get(id, mountLabel string) (string, error) {
-	panicIfUsedByLcow()
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
 	logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel)
 	var dir string
 
 	rID, err := d.resolveID(id)
 	if err != nil {
-		return "", err
+		return nil, err
 	}
 	if count := d.ctr.Increment(rID); count > 1 {
-		return d.cache[rID], nil
+		return containerfs.NewLocalContainerFS(d.cache[rID]), nil
 	}
 
 	// Getting the layer paths must be done outside of the lock.
 	layerChain, err := d.getLayerChain(rID)
 	if err != nil {
 		d.ctr.Decrement(rID)
-		return "", err
+		return nil, err
 	}
 
 	if err := hcsshim.ActivateLayer(d.info, rID); err != nil {
 		d.ctr.Decrement(rID)
-		return "", err
+		return nil, err
 	}
 	if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil {
 		d.ctr.Decrement(rID)
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
 			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
 		}
-		return "", err
+		return nil, err
 	}
 
 	mountPath, err := hcsshim.GetLayerMountPath(d.info, rID)
@@ -395,7 +381,7 @@
 		if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil {
 			logrus.Warnf("Failed to Deactivate %s: %s", id, err)
 		}
-		return "", err
+		return nil, err
 	}
 	d.cacheMu.Lock()
 	d.cache[rID] = mountPath
@@ -409,12 +395,11 @@
 		dir = d.dir(id)
 	}
 
-	return dir, nil
+	return containerfs.NewLocalContainerFS(dir), nil
 }
 
 // Put adds a new layer to the driver.
 func (d *Driver) Put(id string) error {
-	panicIfUsedByLcow()
 	logrus.Debugf("WindowsGraphDriver Put() id %s", id)
 
 	rID, err := d.resolveID(id)
@@ -473,7 +458,6 @@
 // layer and its parent layer which may be "".
 // The layer should be mounted when calling this function
 func (d *Driver) Diff(id, parent string) (_ io.ReadCloser, err error) {
-	panicIfUsedByLcow()
 	rID, err := d.resolveID(id)
 	if err != nil {
 		return
@@ -510,7 +494,6 @@
 // and its parent layer. If parent is "", then all changes will be ADD changes.
 // The layer should not be mounted when calling this function.
 func (d *Driver) Changes(id, parent string) ([]archive.Change, error) {
-	panicIfUsedByLcow()
 	rID, err := d.resolveID(id)
 	if err != nil {
 		return nil, err
@@ -566,7 +549,6 @@
 // new layer in bytes.
 // The layer should not be mounted when calling this function
 func (d *Driver) ApplyDiff(id, parent string, diff io.Reader) (int64, error) {
-	panicIfUsedByLcow()
 	var layerChain []string
 	if parent != "" {
 		rPId, err := d.resolveID(parent)
@@ -601,7 +583,6 @@
 // and its parent and returns the size in bytes of the changes
 // relative to its base filesystem directory.
 func (d *Driver) DiffSize(id, parent string) (size int64, err error) {
-	panicIfUsedByLcow()
 	rPId, err := d.resolveID(parent)
 	if err != nil {
 		return
@@ -618,12 +599,11 @@
 	}
 	defer d.Put(id)
 
-	return archive.ChangesSize(layerFs, changes), nil
+	return archive.ChangesSize(layerFs.Path(), changes), nil
 }
 
 // GetMetadata returns custom driver information.
 func (d *Driver) GetMetadata(id string) (map[string]string, error) {
-	panicIfUsedByLcow()
 	m := make(map[string]string)
 	m["dir"] = d.dir(id)
 	return m, nil
@@ -926,7 +906,6 @@
 // DiffGetter returns a FileGetCloser that can read files from the directory that
 // contains files for the layer differences. Used for direct access for tar-split.
 func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) {
-	panicIfUsedByLcow()
 	id, err := d.resolveID(id)
 	if err != nil {
 		return nil, err
diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go
index 729099a..52e2aa1 100644
--- a/daemon/graphdriver/zfs/zfs.go
+++ b/daemon/graphdriver/zfs/zfs.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris
+// +build linux freebsd
 
 package zfs
 
@@ -13,6 +13,7 @@
 	"time"
 
 	"github.com/docker/docker/daemon/graphdriver"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/parsers"
@@ -356,10 +357,10 @@
 }
 
 // Get returns the mountpoint for the given id after creating the target directories if necessary.
-func (d *Driver) Get(id, mountLabel string) (string, error) {
+func (d *Driver) Get(id, mountLabel string) (containerfs.ContainerFS, error) {
 	mountpoint := d.mountPath(id)
 	if count := d.ctr.Increment(mountpoint); count > 1 {
-		return mountpoint, nil
+		return containerfs.NewLocalContainerFS(mountpoint), nil
 	}
 
 	filesystem := d.zfsPath(id)
@@ -369,17 +370,17 @@
 	rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps)
 	if err != nil {
 		d.ctr.Decrement(mountpoint)
-		return "", err
+		return nil, err
 	}
 	// Create the target directories if they don't exist
 	if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil {
 		d.ctr.Decrement(mountpoint)
-		return "", err
+		return nil, err
 	}
 
 	if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil {
 		d.ctr.Decrement(mountpoint)
-		return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
+		return nil, fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err)
 	}
 
 	// this could be our first mount after creation of the filesystem, and the root dir may still have root
@@ -387,10 +388,10 @@
 	if err := os.Chown(mountpoint, rootUID, rootGID); err != nil {
 		mount.Unmount(mountpoint)
 		d.ctr.Decrement(mountpoint)
-		return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
+		return nil, fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err)
 	}
 
-	return mountpoint, nil
+	return containerfs.NewLocalContainerFS(mountpoint), nil
 }
 
 // Put removes the existing mountpoint for the given id if it exists.
@@ -416,5 +417,5 @@
 func (d *Driver) Exists(id string) bool {
 	d.Lock()
 	defer d.Unlock()
-	return d.filesystemsCache[d.zfsPath(id)] == true
+	return d.filesystemsCache[d.zfsPath(id)]
 }
diff --git a/daemon/graphdriver/zfs/zfs_solaris.go b/daemon/graphdriver/zfs/zfs_solaris.go
deleted file mode 100644
index d636422..0000000
--- a/daemon/graphdriver/zfs/zfs_solaris.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// +build solaris,cgo
-
-package zfs
-
-/*
-#include <sys/statvfs.h>
-#include <stdlib.h>
-
-static inline struct statvfs *getstatfs(char *s) {
-        struct statvfs *buf;
-        int err;
-        buf = (struct statvfs *)malloc(sizeof(struct statvfs));
-        err = statvfs(s, buf);
-        return buf;
-}
-*/
-import "C"
-import (
-	"path/filepath"
-	"strings"
-	"unsafe"
-
-	"github.com/docker/docker/daemon/graphdriver"
-	"github.com/sirupsen/logrus"
-)
-
-func checkRootdirFs(rootdir string) error {
-
-	cs := C.CString(filepath.Dir(rootdir))
-	buf := C.getstatfs(cs)
-
-	// on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ]
-	if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) ||
-		(buf.f_basetype[3] != 0) {
-		logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir)
-		C.free(unsafe.Pointer(buf))
-		return graphdriver.ErrPrerequisites
-	}
-
-	C.free(unsafe.Pointer(buf))
-	C.free(unsafe.Pointer(cs))
-	return nil
-}
-
-/* rootfs is introduced to comply with the OCI spec
-which states that root filesystem must be mounted at <CID>/rootfs/ instead of <CID>/
-*/
-func getMountpoint(id string) string {
-	maxlen := 12
-
-	// we need to preserve filesystem suffix
-	suffix := strings.SplitN(id, "-", 2)
-
-	if len(suffix) > 1 {
-		return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root")
-	}
-
-	return filepath.Join(id[:maxlen], "rootfs", "root")
-}
diff --git a/daemon/graphdriver/zfs/zfs_unsupported.go b/daemon/graphdriver/zfs/zfs_unsupported.go
index ce8daad..643b169 100644
--- a/daemon/graphdriver/zfs/zfs_unsupported.go
+++ b/daemon/graphdriver/zfs/zfs_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!freebsd,!solaris
+// +build !linux,!freebsd
 
 package zfs
 
diff --git a/daemon/image.go b/daemon/image.go
index 23670c4..6e90429 100644
--- a/daemon/image.go
+++ b/daemon/image.go
@@ -24,9 +24,9 @@
 
 func (e errImageDoesNotExist) NotFound() {}
 
-// GetImageIDAndPlatform returns an image ID and platform corresponding to the image referred to by
+// GetImageIDAndOS returns an image ID and operating system corresponding to the image referred to by
 // refOrID.
-func (daemon *Daemon) GetImageIDAndPlatform(refOrID string) (image.ID, string, error) {
+func (daemon *Daemon) GetImageIDAndOS(refOrID string) (image.ID, string, error) {
 	ref, err := reference.ParseAnyReference(refOrID)
 	if err != nil {
 		return "", "", validationError{err}
@@ -47,16 +47,16 @@
 	}
 
 	if digest, err := daemon.referenceStore.Get(namedRef); err == nil {
-		// Search the image stores to get the platform, defaulting to host OS.
-		imagePlatform := runtime.GOOS
+		// Search the image stores to get the operating system, defaulting to host OS.
+		imageOS := runtime.GOOS
 		id := image.IDFromDigest(digest)
-		for platform := range daemon.stores {
-			if img, err := daemon.stores[platform].imageStore.Get(id); err == nil {
-				imagePlatform = img.Platform()
+		for os := range daemon.stores {
+			if img, err := daemon.stores[os].imageStore.Get(id); err == nil {
+				imageOS = img.OperatingSystem()
 				break
 			}
 		}
-		return id, imagePlatform, nil
+		return id, imageOS, nil
 	}
 
 	// deprecated: repo:shortid https://github.com/docker/docker/pull/799
@@ -75,9 +75,9 @@
 	}
 
 	// Search based on ID
-	for platform := range daemon.stores {
-		if id, err := daemon.stores[platform].imageStore.Search(refOrID); err == nil {
-			return id, platform, nil
+	for os := range daemon.stores {
+		if id, err := daemon.stores[os].imageStore.Search(refOrID); err == nil {
+			return id, os, nil
 		}
 	}
 
@@ -86,9 +86,9 @@
 
 // GetImage returns an image corresponding to the image referred to by refOrID.
 func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) {
-	imgID, platform, err := daemon.GetImageIDAndPlatform(refOrID)
+	imgID, os, err := daemon.GetImageIDAndOS(refOrID)
 	if err != nil {
 		return nil, err
 	}
-	return daemon.stores[platform].imageStore.Get(imgID)
+	return daemon.stores[os].imageStore.Get(imgID)
 }
diff --git a/daemon/image_delete.go b/daemon/image_delete.go
index 9873924..8e51931 100644
--- a/daemon/image_delete.go
+++ b/daemon/image_delete.go
@@ -65,7 +65,7 @@
 	start := time.Now()
 	records := []types.ImageDeleteResponseItem{}
 
-	imgID, platform, err := daemon.GetImageIDAndPlatform(imageRef)
+	imgID, os, err := daemon.GetImageIDAndOS(imageRef)
 	if err != nil {
 		return nil, err
 	}
@@ -94,7 +94,7 @@
 			return nil, err
 		}
 
-		parsedRef, err = daemon.removeImageRef(platform, parsedRef)
+		parsedRef, err = daemon.removeImageRef(os, parsedRef)
 		if err != nil {
 			return nil, err
 		}
@@ -122,7 +122,7 @@
 				remainingRefs := []reference.Named{}
 				for _, repoRef := range repoRefs {
 					if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() {
-						if _, err := daemon.removeImageRef(platform, repoRef); err != nil {
+						if _, err := daemon.removeImageRef(os, repoRef); err != nil {
 							return records, err
 						}
 
@@ -152,12 +152,12 @@
 			if !force {
 				c |= conflictSoft &^ conflictActiveReference
 			}
-			if conflict := daemon.checkImageDeleteConflict(imgID, platform, c); conflict != nil {
+			if conflict := daemon.checkImageDeleteConflict(imgID, os, c); conflict != nil {
 				return nil, conflict
 			}
 
 			for _, repoRef := range repoRefs {
-				parsedRef, err := daemon.removeImageRef(platform, repoRef)
+				parsedRef, err := daemon.removeImageRef(os, repoRef)
 				if err != nil {
 					return nil, err
 				}
@@ -170,7 +170,7 @@
 		}
 	}
 
-	if err := daemon.imageDeleteHelper(imgID, platform, &records, force, prune, removedRepositoryRef); err != nil {
+	if err := daemon.imageDeleteHelper(imgID, os, &records, force, prune, removedRepositoryRef); err != nil {
 		return nil, err
 	}
 
diff --git a/daemon/image_tag.go b/daemon/image_tag.go
index 0c1d761..cfac6d6 100644
--- a/daemon/image_tag.go
+++ b/daemon/image_tag.go
@@ -8,7 +8,7 @@
 // TagImage creates the tag specified by newTag, pointing to the image named
 // imageName (alternatively, imageName can also be an image ID).
 func (daemon *Daemon) TagImage(imageName, repository, tag string) error {
-	imageID, platform, err := daemon.GetImageIDAndPlatform(imageName)
+	imageID, os, err := daemon.GetImageIDAndOS(imageName)
 	if err != nil {
 		return err
 	}
@@ -23,16 +23,16 @@
 		}
 	}
 
-	return daemon.TagImageWithReference(imageID, platform, newTag)
+	return daemon.TagImageWithReference(imageID, os, newTag)
 }
 
 // TagImageWithReference adds the given reference to the image ID provided.
-func (daemon *Daemon) TagImageWithReference(imageID image.ID, platform string, newTag reference.Named) error {
+func (daemon *Daemon) TagImageWithReference(imageID image.ID, os string, newTag reference.Named) error {
 	if err := daemon.referenceStore.AddTag(newTag, imageID.Digest(), true); err != nil {
 		return err
 	}
 
-	if err := daemon.stores[platform].imageStore.SetLastUpdated(imageID); err != nil {
+	if err := daemon.stores[os].imageStore.SetLastUpdated(imageID); err != nil {
 		return err
 	}
 	daemon.LogImageEvent(imageID.String(), reference.FamiliarString(newTag), "tag")
diff --git a/daemon/images.go b/daemon/images.go
index 9be6e0e..7cd2c62 100644
--- a/daemon/images.go
+++ b/daemon/images.go
@@ -36,7 +36,7 @@
 
 // Map returns a map of all images in the ImageStore
 func (daemon *Daemon) Map() map[image.ID]*image.Image {
-	// TODO @jhowardmsft LCOW. This will need  work to enumerate the stores for all platforms.
+	// TODO @jhowardmsft LCOW. This can be removed when imagestores are coalesced
 	platform := runtime.GOOS
 	if system.LCOWSupported() {
 		platform = "linux"
@@ -51,7 +51,7 @@
 // the heads.
 func (daemon *Daemon) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) ([]*types.ImageSummary, error) {
 
-	// TODO @jhowardmsft LCOW. This will need  work to enumerate the stores for all platforms.
+	// TODO @jhowardmsft LCOW. This can be removed when imagestores are coalesced
 	platform := runtime.GOOS
 	if system.LCOWSupported() {
 		platform = "linux"
@@ -67,7 +67,7 @@
 		return nil, err
 	}
 
-	if imageFilters.Include("dangling") {
+	if imageFilters.Contains("dangling") {
 		if imageFilters.ExactMatch("dangling", "true") {
 			danglingOnly = true
 		} else if !imageFilters.ExactMatch("dangling", "false") {
@@ -116,7 +116,7 @@
 			}
 		}
 
-		if imageFilters.Include("label") {
+		if imageFilters.Contains("label") {
 			// Very old image that do not have image.Config (or even labels)
 			if img.Config == nil {
 				continue
@@ -150,7 +150,7 @@
 		newImage := newImage(img, size)
 
 		for _, ref := range daemon.referenceStore.References(id.Digest()) {
-			if imageFilters.Include("reference") {
+			if imageFilters.Contains("reference") {
 				var found bool
 				var matchErr error
 				for _, pattern := range imageFilters.Get("reference") {
@@ -173,11 +173,11 @@
 		if newImage.RepoDigests == nil && newImage.RepoTags == nil {
 			if all || len(daemon.stores[platform].imageStore.Children(id)) == 0 {
 
-				if imageFilters.Include("dangling") && !danglingOnly {
+				if imageFilters.Contains("dangling") && !danglingOnly {
 					//dangling=false case, so dangling image is not needed
 					continue
 				}
-				if imageFilters.Include("reference") { // skip images with no references if filtering by reference
+				if imageFilters.Contains("reference") { // skip images with no references if filtering by reference
 					continue
 				}
 				newImage.RepoDigests = []string{"<none>@<none>"}
@@ -273,7 +273,7 @@
 	var parentImg *image.Image
 	var parentChainID layer.ChainID
 	if len(parent) != 0 {
-		parentImg, err = daemon.stores[img.Platform()].imageStore.Get(image.ID(parent))
+		parentImg, err = daemon.stores[img.OperatingSystem()].imageStore.Get(image.ID(parent))
 		if err != nil {
 			return "", errors.Wrap(err, "error getting specified parent layer")
 		}
@@ -283,11 +283,11 @@
 		parentImg = &image.Image{RootFS: rootFS}
 	}
 
-	l, err := daemon.stores[img.Platform()].layerStore.Get(img.RootFS.ChainID())
+	l, err := daemon.stores[img.OperatingSystem()].layerStore.Get(img.RootFS.ChainID())
 	if err != nil {
 		return "", errors.Wrap(err, "error getting image layer")
 	}
-	defer daemon.stores[img.Platform()].layerStore.Release(l)
+	defer daemon.stores[img.OperatingSystem()].layerStore.Release(l)
 
 	ts, err := l.TarStreamFrom(parentChainID)
 	if err != nil {
@@ -295,18 +295,16 @@
 	}
 	defer ts.Close()
 
-	newL, err := daemon.stores[img.Platform()].layerStore.Register(ts, parentChainID, layer.Platform(img.Platform()))
+	newL, err := daemon.stores[img.OperatingSystem()].layerStore.Register(ts, parentChainID, layer.OS(img.OperatingSystem()))
 	if err != nil {
 		return "", errors.Wrap(err, "error registering layer")
 	}
-	defer daemon.stores[img.Platform()].layerStore.Release(newL)
+	defer daemon.stores[img.OperatingSystem()].layerStore.Release(newL)
 
-	var newImage image.Image
-	newImage = *img
+	newImage := *img
 	newImage.RootFS = nil
 
-	var rootFS image.RootFS
-	rootFS = *parentImg.RootFS
+	rootFS := *parentImg.RootFS
 	rootFS.DiffIDs = append(rootFS.DiffIDs, newL.DiffID())
 	newImage.RootFS = &rootFS
 
@@ -336,7 +334,7 @@
 		return "", errors.Wrap(err, "error marshalling image config")
 	}
 
-	newImgID, err := daemon.stores[img.Platform()].imageStore.Create(b)
+	newImgID, err := daemon.stores[img.OperatingSystem()].imageStore.Create(b)
 	if err != nil {
 		return "", errors.Wrap(err, "error creating new image after squash")
 	}
diff --git a/daemon/import.go b/daemon/import.go
index e58d912..3a25bb8 100644
--- a/daemon/import.go
+++ b/daemon/import.go
@@ -26,16 +26,16 @@
 // inConfig (if src is "-"), or from a URI specified in src. Progress output is
 // written to outStream. Repository and tag names can optionally be given in
 // the repo and tag arguments, respectively.
-func (daemon *Daemon) ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error {
+func (daemon *Daemon) ImportImage(src string, repository, os string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error {
 	var (
 		rc     io.ReadCloser
 		resp   *http.Response
 		newRef reference.Named
 	)
 
-	// Default the platform if not supplied.
-	if platform == "" {
-		platform = runtime.GOOS
+	// Default the operating system if not supplied.
+	if os == "" {
+		os = runtime.GOOS
 	}
 
 	if repository != "" {
@@ -90,11 +90,11 @@
 	if err != nil {
 		return err
 	}
-	l, err := daemon.stores[platform].layerStore.Register(inflatedLayerData, "", layer.Platform(platform))
+	l, err := daemon.stores[os].layerStore.Register(inflatedLayerData, "", layer.OS(os))
 	if err != nil {
 		return err
 	}
-	defer layer.ReleaseAndLog(daemon.stores[platform].layerStore, l)
+	defer layer.ReleaseAndLog(daemon.stores[os].layerStore, l)
 
 	created := time.Now().UTC()
 	imgConfig, err := json.Marshal(&image.Image{
@@ -102,7 +102,7 @@
 			DockerVersion: dockerversion.Version,
 			Config:        config,
 			Architecture:  runtime.GOARCH,
-			OS:            platform,
+			OS:            os,
 			Created:       created,
 			Comment:       msg,
 		},
@@ -119,14 +119,14 @@
 		return err
 	}
 
-	id, err := daemon.stores[platform].imageStore.Create(imgConfig)
+	id, err := daemon.stores[os].imageStore.Create(imgConfig)
 	if err != nil {
 		return err
 	}
 
 	// FIXME: connect with commit code and call refstore directly
 	if newRef != nil {
-		if err := daemon.TagImageWithReference(id, platform, newRef); err != nil {
+		if err := daemon.TagImageWithReference(id, os, newRef); err != nil {
 			return err
 		}
 	}
diff --git a/daemon/info_unix.go b/daemon/info_unix.go
index f43af62..9433434 100644
--- a/daemon/info_unix.go
+++ b/daemon/info_unix.go
@@ -28,16 +28,8 @@
 	v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName()
 	v.InitBinary = daemon.configStore.GetInitPath()
 
-	v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID
-	if sv, err := daemon.containerd.GetServerVersion(context.Background()); err == nil {
-		v.ContainerdCommit.ID = sv.Revision
-	} else {
-		logrus.Warnf("failed to retrieve containerd version: %v", err)
-		v.ContainerdCommit.ID = "N/A"
-	}
-
 	v.RuncCommit.Expected = dockerversion.RuncCommitID
-	defaultRuntimeBinary := daemon.configStore.GetRuntime(daemon.configStore.GetDefaultRuntimeName()).Path
+	defaultRuntimeBinary := daemon.configStore.GetRuntime(v.DefaultRuntime).Path
 	if rv, err := exec.Command(defaultRuntimeBinary, "--version").Output(); err == nil {
 		parts := strings.Split(strings.TrimSpace(string(rv)), "\n")
 		if len(parts) == 3 {
@@ -56,6 +48,14 @@
 		v.RuncCommit.ID = "N/A"
 	}
 
+	v.ContainerdCommit.Expected = dockerversion.ContainerdCommitID
+	if rv, err := daemon.containerd.Version(context.Background()); err == nil {
+		v.ContainerdCommit.ID = rv.Revision
+	} else {
+		logrus.Warnf("failed to retrieve containerd version: %v", err)
+		v.ContainerdCommit.ID = "N/A"
+	}
+
 	defaultInitBinary := daemon.configStore.GetInitPath()
 	if rv, err := exec.Command(defaultInitBinary, "--version").Output(); err == nil {
 		ver, err := parseInitVersion(string(rv))
diff --git a/daemon/initlayer/setup_solaris.go b/daemon/initlayer/setup_solaris.go
deleted file mode 100644
index 66d53f0..0000000
--- a/daemon/initlayer/setup_solaris.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build solaris,cgo
-
-package initlayer
-
-// Setup populates a directory with mountpoints suitable
-// for bind-mounting dockerinit into the container. The mountpoint is simply an
-// empty file at /.dockerinit
-//
-// This extra layer is used by all containers as the top-most ro layer. It protects
-// the container from unwanted side-effects on the rw layer.
-func Setup(initLayer string, rootUID, rootGID int) error {
-	return nil
-}
diff --git a/daemon/initlayer/setup_unix.go b/daemon/initlayer/setup_unix.go
index e26d3a0..a02cea6 100644
--- a/daemon/initlayer/setup_unix.go
+++ b/daemon/initlayer/setup_unix.go
@@ -7,6 +7,7 @@
 	"path/filepath"
 	"strings"
 
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"golang.org/x/sys/unix"
 )
@@ -16,7 +17,10 @@
 //
 // This extra layer is used by all containers as the top-most ro layer. It protects
 // the container from unwanted side-effects on the rw layer.
-func Setup(initLayer string, rootIDs idtools.IDPair) error {
+func Setup(initLayerFs containerfs.ContainerFS, rootIDs idtools.IDPair) error {
+	// Since all paths are local to the container, we can just extract initLayerFs.Path()
+	initLayer := initLayerFs.Path()
+
 	for pth, typ := range map[string]string{
 		"/dev/pts":         "dir",
 		"/dev/shm":         "dir",
diff --git a/daemon/initlayer/setup_windows.go b/daemon/initlayer/setup_windows.go
index 2b22f58..b47563e 100644
--- a/daemon/initlayer/setup_windows.go
+++ b/daemon/initlayer/setup_windows.go
@@ -3,6 +3,7 @@
 package initlayer
 
 import (
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 )
 
@@ -12,6 +13,6 @@
 //
 // This extra layer is used by all containers as the top-most ro layer. It protects
 // the container from unwanted side-effects on the rw layer.
-func Setup(initLayer string, rootIDs idtools.IDPair) error {
+func Setup(initLayer containerfs.ContainerFS, rootIDs idtools.IDPair) error {
 	return nil
 }
diff --git a/daemon/inspect.go b/daemon/inspect.go
index 98f291c..8b0f109 100644
--- a/daemon/inspect.go
+++ b/daemon/inspect.go
@@ -171,7 +171,7 @@
 		Name:         container.Name,
 		RestartCount: container.RestartCount,
 		Driver:       container.Driver,
-		Platform:     container.Platform,
+		Platform:     container.OS,
 		MountLabel:   container.MountLabel,
 		ProcessLabel: container.ProcessLabel,
 		ExecIDs:      container.GetExecIDs(),
diff --git a/daemon/inspect_unix.go b/daemon/inspect_unix.go
index bd28481..f073695 100644
--- a/daemon/inspect_unix.go
+++ b/daemon/inspect_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!solaris
+// +build !windows
 
 package daemon
 
diff --git a/daemon/kill.go b/daemon/kill.go
index 4398151..a230eaa 100644
--- a/daemon/kill.go
+++ b/daemon/kill.go
@@ -9,6 +9,7 @@
 	"time"
 
 	containerpkg "github.com/docker/docker/container"
+	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/pkg/signal"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
@@ -108,7 +109,7 @@
 
 	if unpause {
 		// above kill signal will be sent once resume is finished
-		if err := daemon.containerd.Resume(container.ID); err != nil {
+		if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil {
 			logrus.Warn("Cannot unpause container %s: %s", container.ID, err)
 		}
 	}
@@ -160,7 +161,7 @@
 
 	// Wait for exit with no timeout.
 	// Ignore returned status.
-	_ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning)
+	<-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning)
 
 	return nil
 }
@@ -177,5 +178,5 @@
 }
 
 func (daemon *Daemon) kill(c *containerpkg.Container, sig int) error {
-	return daemon.containerd.Signal(c.ID, sig)
+	return daemon.containerd.SignalProcess(context.Background(), c.ID, libcontainerd.InitProcessName, sig)
 }
diff --git a/daemon/list.go b/daemon/list.go
index b171ffb..c87dd6e 100644
--- a/daemon/list.go
+++ b/daemon/list.go
@@ -276,7 +276,7 @@
 	}
 
 	var taskFilter, isTask bool
-	if psFilters.Include("is-task") {
+	if psFilters.Contains("is-task") {
 		if psFilters.ExactMatch("is-task", "true") {
 			taskFilter = true
 			isTask = true
@@ -319,10 +319,10 @@
 
 	imagesFilter := map[image.ID]bool{}
 	var ancestorFilter bool
-	if psFilters.Include("ancestor") {
+	if psFilters.Contains("ancestor") {
 		ancestorFilter = true
 		psFilters.WalkValues("ancestor", func(ancestor string) error {
-			id, platform, err := daemon.GetImageIDAndPlatform(ancestor)
+			id, os, err := daemon.GetImageIDAndOS(ancestor)
 			if err != nil {
 				logrus.Warnf("Error while looking up for image %v", ancestor)
 				return nil
@@ -332,7 +332,7 @@
 				return nil
 			}
 			// Then walk down the graph and put the imageIds in imagesFilter
-			populateImageFilterByParents(imagesFilter, id, daemon.stores[platform].imageStore.Children)
+			populateImageFilterByParents(imagesFilter, id, daemon.stores[os].imageStore.Children)
 			return nil
 		})
 	}
@@ -465,7 +465,7 @@
 		return excludeContainer
 	}
 
-	if ctx.filters.Include("volume") {
+	if ctx.filters.Contains("volume") {
 		volumesByName := make(map[string]types.MountPoint)
 		for _, m := range container.Mounts {
 			if m.Name != "" {
@@ -509,7 +509,7 @@
 		networkExist = errors.New("container part of network")
 		noNetworks   = errors.New("container is not part of any networks")
 	)
-	if ctx.filters.Include("network") {
+	if ctx.filters.Contains("network") {
 		err := ctx.filters.WalkValues("network", func(value string) error {
 			if container.NetworkSettings == nil {
 				return noNetworks
@@ -566,7 +566,7 @@
 	c := s.Container
 	image := s.Image // keep the original ref if still valid (hasn't changed)
 	if image != s.ImageID {
-		id, _, err := daemon.GetImageIDAndPlatform(image)
+		id, _, err := daemon.GetImageIDAndOS(image)
 		if _, isDNE := err.(errImageDoesNotExist); err != nil && !isDNE {
 			return nil, err
 		}
@@ -585,7 +585,7 @@
 	var (
 		volumesOut []*types.Volume
 	)
-	volFilters, err := filters.FromParam(filter)
+	volFilters, err := filters.FromJSON(filter)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -627,17 +627,17 @@
 
 	var retVols []volume.Volume
 	for _, vol := range vols {
-		if filter.Include("name") {
+		if filter.Contains("name") {
 			if !filter.Match("name", vol.Name()) {
 				continue
 			}
 		}
-		if filter.Include("driver") {
+		if filter.Contains("driver") {
 			if !filter.ExactMatch("driver", vol.DriverName()) {
 				continue
 			}
 		}
-		if filter.Include("label") {
+		if filter.Contains("label") {
 			v, ok := vol.(volume.DetailedVolume)
 			if !ok {
 				continue
@@ -649,7 +649,7 @@
 		retVols = append(retVols, vol)
 	}
 	danglingOnly := false
-	if filter.Include("dangling") {
+	if filter.Contains("dangling") {
 		if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") {
 			danglingOnly = true
 		} else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") {
diff --git a/daemon/list_unix.go b/daemon/list_unix.go
index ebaae45..7b92c7c 100644
--- a/daemon/list_unix.go
+++ b/daemon/list_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris
+// +build linux freebsd
 
 package daemon
 
diff --git a/daemon/listeners/listeners_unix.go b/daemon/listeners/listeners_unix.go
index 0a4e5e4..3a7c0f8 100644
--- a/daemon/listeners/listeners_unix.go
+++ b/daemon/listeners/listeners_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!solaris
+// +build !windows
 
 package listeners
 
diff --git a/daemon/logger/adapter.go b/daemon/logger/adapter.go
index 98852e8..5817913 100644
--- a/daemon/logger/adapter.go
+++ b/daemon/logger/adapter.go
@@ -122,6 +122,9 @@
 			if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) {
 				continue
 			}
+			if !config.Until.IsZero() && msg.Timestamp.After(config.Until) {
+				return
+			}
 
 			select {
 			case watcher.Msg <- msg:
diff --git a/daemon/logger/awslogs/cloudwatchlogs.go b/daemon/logger/awslogs/cloudwatchlogs.go
index 3a7f2f6..4ea9420 100644
--- a/daemon/logger/awslogs/cloudwatchlogs.go
+++ b/daemon/logger/awslogs/cloudwatchlogs.go
@@ -14,6 +14,7 @@
 
 	"github.com/aws/aws-sdk-go/aws"
 	"github.com/aws/aws-sdk-go/aws/awserr"
+	"github.com/aws/aws-sdk-go/aws/credentials/endpointcreds"
 	"github.com/aws/aws-sdk-go/aws/ec2metadata"
 	"github.com/aws/aws-sdk-go/aws/request"
 	"github.com/aws/aws-sdk-go/aws/session"
@@ -26,16 +27,17 @@
 )
 
 const (
-	name                  = "awslogs"
-	regionKey             = "awslogs-region"
-	regionEnvKey          = "AWS_REGION"
-	logGroupKey           = "awslogs-group"
-	logStreamKey          = "awslogs-stream"
-	logCreateGroupKey     = "awslogs-create-group"
-	tagKey                = "tag"
-	datetimeFormatKey     = "awslogs-datetime-format"
-	multilinePatternKey   = "awslogs-multiline-pattern"
-	batchPublishFrequency = 5 * time.Second
+	name                   = "awslogs"
+	regionKey              = "awslogs-region"
+	regionEnvKey           = "AWS_REGION"
+	logGroupKey            = "awslogs-group"
+	logStreamKey           = "awslogs-stream"
+	logCreateGroupKey      = "awslogs-create-group"
+	tagKey                 = "tag"
+	datetimeFormatKey      = "awslogs-datetime-format"
+	multilinePatternKey    = "awslogs-multiline-pattern"
+	credentialsEndpointKey = "awslogs-credentials-endpoint"
+	batchPublishFrequency  = 5 * time.Second
 
 	// See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html
 	perEventBytes          = 26
@@ -50,6 +52,8 @@
 	invalidSequenceTokenCode  = "InvalidSequenceTokenException"
 	resourceNotFoundCode      = "ResourceNotFoundException"
 
+	credentialsEndpoint = "http://169.254.170.2"
+
 	userAgentHeader = "User-Agent"
 )
 
@@ -198,6 +202,10 @@
 	return ec2metadata.New(session.New())
 }
 
+// newSDKEndpoint is a variable such that the implementation
+// can be swapped out for unit tests.
+var newSDKEndpoint = credentialsEndpoint
+
 // newAWSLogsClient creates the service client for Amazon CloudWatch Logs.
 // Customizations to the default client from the SDK include a Docker-specific
 // User-Agent string and automatic region detection using the EC2 Instance
@@ -222,11 +230,33 @@
 		}
 		region = &r
 	}
+
+	sess, err := session.NewSession()
+	if err != nil {
+		return nil, errors.New("Failed to create a service client session for for awslogs driver")
+	}
+
+	// attach region to cloudwatchlogs config
+	sess.Config.Region = region
+
+	if uri, ok := info.Config[credentialsEndpointKey]; ok {
+		logrus.Debugf("Trying to get credentials from awslogs-credentials-endpoint")
+
+		endpoint := fmt.Sprintf("%s%s", newSDKEndpoint, uri)
+		creds := endpointcreds.NewCredentialsClient(*sess.Config, sess.Handlers, endpoint,
+			func(p *endpointcreds.Provider) {
+				p.ExpiryWindow = 5 * time.Minute
+			})
+
+		// attach credentials to cloudwatchlogs config
+		sess.Config.Credentials = creds
+	}
+
 	logrus.WithFields(logrus.Fields{
 		"region": *region,
 	}).Debug("Created awslogs client")
 
-	client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region))
+	client := cloudwatchlogs.New(sess)
 
 	client.Handlers.Build.PushBackNamed(request.NamedHandler{
 		Name: "DockerUserAgentHandler",
@@ -245,6 +275,10 @@
 	return name
 }
 
+func (l *logStream) BufSize() int {
+	return maximumBytesPerEvent
+}
+
 // Log submits messages for logging by an instance of the awslogs logging driver
 func (l *logStream) Log(msg *logger.Message) error {
 	l.lock.RLock()
@@ -525,6 +559,7 @@
 		case tagKey:
 		case datetimeFormatKey:
 		case multilinePatternKey:
+		case credentialsEndpointKey:
 		default:
 			return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name)
 		}
diff --git a/daemon/logger/awslogs/cloudwatchlogs_test.go b/daemon/logger/awslogs/cloudwatchlogs_test.go
index 989eb6f..7ebc5de 100644
--- a/daemon/logger/awslogs/cloudwatchlogs_test.go
+++ b/daemon/logger/awslogs/cloudwatchlogs_test.go
@@ -3,7 +3,10 @@
 import (
 	"errors"
 	"fmt"
+	"io/ioutil"
 	"net/http"
+	"net/http/httptest"
+	"os"
 	"reflect"
 	"regexp"
 	"runtime"
@@ -1049,6 +1052,11 @@
 	}
 }
 
+func TestIsSizedLogger(t *testing.T) {
+	awslogs := &logStream{}
+	assert.Implements(t, (*logger.SizedLogger)(nil), awslogs, "awslogs should implement SizedLogger")
+}
+
 func BenchmarkUnwrapEvents(b *testing.B) {
 	events := make([]wrappedEvent, maximumLogEventsPerPut)
 	for i := 0; i < maximumLogEventsPerPut; i++ {
@@ -1065,3 +1073,121 @@
 		as.Len(res, maximumLogEventsPerPut)
 	}
 }
+
+func TestNewAWSLogsClientCredentialEndpointDetect(t *testing.T) {
+	// required for the cloudwatchlogs client
+	os.Setenv("AWS_REGION", "us-west-2")
+	defer os.Unsetenv("AWS_REGION")
+
+	credsResp := `{
+		"AccessKeyId" :    "test-access-key-id",
+		"SecretAccessKey": "test-secret-access-key"
+		}`
+
+	expectedAccessKeyID := "test-access-key-id"
+	expectedSecretAccessKey := "test-secret-access-key"
+
+	testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+		w.Header().Set("Content-Type", "application/json")
+		fmt.Fprintln(w, credsResp)
+	}))
+	defer testServer.Close()
+
+	// set the SDKEndpoint in the driver
+	newSDKEndpoint = testServer.URL
+
+	info := logger.Info{
+		Config: map[string]string{},
+	}
+
+	info.Config["awslogs-credentials-endpoint"] = "/creds"
+
+	c, err := newAWSLogsClient(info)
+	assert.NoError(t, err)
+
+	client := c.(*cloudwatchlogs.CloudWatchLogs)
+
+	creds, err := client.Config.Credentials.Get()
+	assert.NoError(t, err)
+
+	assert.Equal(t, expectedAccessKeyID, creds.AccessKeyID)
+	assert.Equal(t, expectedSecretAccessKey, creds.SecretAccessKey)
+}
+
+func TestNewAWSLogsClientCredentialEnvironmentVariable(t *testing.T) {
+	// required for the cloudwatchlogs client
+	os.Setenv("AWS_REGION", "us-west-2")
+	defer os.Unsetenv("AWS_REGION")
+
+	expectedAccessKeyID := "test-access-key-id"
+	expectedSecretAccessKey := "test-secret-access-key"
+
+	os.Setenv("AWS_ACCESS_KEY_ID", expectedAccessKeyID)
+	defer os.Unsetenv("AWS_ACCESS_KEY_ID")
+
+	os.Setenv("AWS_SECRET_ACCESS_KEY", expectedSecretAccessKey)
+	defer os.Unsetenv("AWS_SECRET_ACCESS_KEY")
+
+	info := logger.Info{
+		Config: map[string]string{},
+	}
+
+	c, err := newAWSLogsClient(info)
+	assert.NoError(t, err)
+
+	client := c.(*cloudwatchlogs.CloudWatchLogs)
+
+	creds, err := client.Config.Credentials.Get()
+	assert.NoError(t, err)
+
+	assert.Equal(t, expectedAccessKeyID, creds.AccessKeyID)
+	assert.Equal(t, expectedSecretAccessKey, creds.SecretAccessKey)
+
+}
+
+func TestNewAWSLogsClientCredentialSharedFile(t *testing.T) {
+	// required for the cloudwatchlogs client
+	os.Setenv("AWS_REGION", "us-west-2")
+	defer os.Unsetenv("AWS_REGION")
+
+	expectedAccessKeyID := "test-access-key-id"
+	expectedSecretAccessKey := "test-secret-access-key"
+
+	contentStr := `
+	[default]
+	aws_access_key_id = "test-access-key-id"
+	aws_secret_access_key =  "test-secret-access-key"
+	`
+	content := []byte(contentStr)
+
+	tmpfile, err := ioutil.TempFile("", "example")
+	defer os.Remove(tmpfile.Name()) // clean up
+	assert.NoError(t, err)
+
+	_, err = tmpfile.Write(content)
+	assert.NoError(t, err)
+
+	err = tmpfile.Close()
+	assert.NoError(t, err)
+
+	os.Unsetenv("AWS_ACCESS_KEY_ID")
+	os.Unsetenv("AWS_SECRET_ACCESS_KEY")
+
+	os.Setenv("AWS_SHARED_CREDENTIALS_FILE", tmpfile.Name())
+	defer os.Unsetenv("AWS_SHARED_CREDENTIALS_FILE")
+
+	info := logger.Info{
+		Config: map[string]string{},
+	}
+
+	c, err := newAWSLogsClient(info)
+	assert.NoError(t, err)
+
+	client := c.(*cloudwatchlogs.CloudWatchLogs)
+
+	creds, err := client.Config.Credentials.Get()
+	assert.NoError(t, err)
+
+	assert.Equal(t, expectedAccessKeyID, creds.AccessKeyID)
+	assert.Equal(t, expectedSecretAccessKey, creds.SecretAccessKey)
+}
diff --git a/daemon/logger/copier.go b/daemon/logger/copier.go
index c773fc6..a1d4f06 100644
--- a/daemon/logger/copier.go
+++ b/daemon/logger/copier.go
@@ -10,8 +10,13 @@
 )
 
 const (
-	bufSize  = 16 * 1024
+	// readSize is the maximum bytes read during a single read
+	// operation.
 	readSize = 2 * 1024
+
+	// defaultBufSize provides a reasonable default for loggers that do
+	// not have an external limit to impose on log line size.
+	defaultBufSize = 16 * 1024
 )
 
 // Copier can copy logs from specified sources to Logger and attach Timestamp.
@@ -44,7 +49,13 @@
 
 func (c *Copier) copySrc(name string, src io.Reader) {
 	defer c.copyJobs.Done()
+
+	bufSize := defaultBufSize
+	if sizedLogger, ok := c.dst.(SizedLogger); ok {
+		bufSize = sizedLogger.BufSize()
+	}
 	buf := make([]byte, bufSize)
+
 	n := 0
 	eof := false
 
diff --git a/daemon/logger/copier_test.go b/daemon/logger/copier_test.go
index 4210022..a911a70 100644
--- a/daemon/logger/copier_test.go
+++ b/daemon/logger/copier_test.go
@@ -31,6 +31,25 @@
 
 func (l *TestLoggerJSON) Name() string { return "json" }
 
+type TestSizedLoggerJSON struct {
+	*json.Encoder
+	mu sync.Mutex
+}
+
+func (l *TestSizedLoggerJSON) Log(m *Message) error {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	return l.Encode(m)
+}
+
+func (*TestSizedLoggerJSON) Close() error { return nil }
+
+func (*TestSizedLoggerJSON) Name() string { return "sized-json" }
+
+func (*TestSizedLoggerJSON) BufSize() int {
+	return 32 * 1024
+}
+
 func TestCopier(t *testing.T) {
 	stdoutLine := "Line that thinks that it is log line from docker stdout"
 	stderrLine := "Line that thinks that it is log line from docker stderr"
@@ -104,10 +123,9 @@
 
 // TestCopierLongLines tests long lines without line breaks
 func TestCopierLongLines(t *testing.T) {
-	// Long lines (should be split at "bufSize")
-	const bufSize = 16 * 1024
-	stdoutLongLine := strings.Repeat("a", bufSize)
-	stderrLongLine := strings.Repeat("b", bufSize)
+	// Long lines (should be split at "defaultBufSize")
+	stdoutLongLine := strings.Repeat("a", defaultBufSize)
+	stderrLongLine := strings.Repeat("b", defaultBufSize)
 	stdoutTrailingLine := "stdout trailing line"
 	stderrTrailingLine := "stderr trailing line"
 
@@ -205,6 +223,41 @@
 	}
 }
 
+func TestCopierWithSized(t *testing.T) {
+	var jsonBuf bytes.Buffer
+	expectedMsgs := 2
+	sizedLogger := &TestSizedLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)}
+	logbuf := bytes.NewBufferString(strings.Repeat(".", sizedLogger.BufSize()*expectedMsgs))
+	c := NewCopier(map[string]io.Reader{"stdout": logbuf}, sizedLogger)
+
+	c.Run()
+	// Wait for Copier to finish writing to the buffered logger.
+	c.Wait()
+	c.Close()
+
+	recvdMsgs := 0
+	dec := json.NewDecoder(&jsonBuf)
+	for {
+		var msg Message
+		if err := dec.Decode(&msg); err != nil {
+			if err == io.EOF {
+				break
+			}
+			t.Fatal(err)
+		}
+		if msg.Source != "stdout" {
+			t.Fatalf("Wrong Source: %q, should be %q", msg.Source, "stdout")
+		}
+		if len(msg.Line) != sizedLogger.BufSize() {
+			t.Fatalf("Line was not of expected max length %d, was %d", sizedLogger.BufSize(), len(msg.Line))
+		}
+		recvdMsgs++
+	}
+	if recvdMsgs != expectedMsgs {
+		t.Fatalf("expected to receive %d messages, actually received %d", expectedMsgs, recvdMsgs)
+	}
+}
+
 type BenchmarkLoggerDummy struct {
 }
 
diff --git a/daemon/logger/factory.go b/daemon/logger/factory.go
index 3200159..2ef3bf8 100644
--- a/daemon/logger/factory.go
+++ b/daemon/logger/factory.go
@@ -93,7 +93,7 @@
 	lf.m.Lock()
 	defer lf.m.Unlock()
 
-	c, _ := lf.optValidator[name]
+	c := lf.optValidator[name]
 	return c
 }
 
diff --git a/daemon/logger/gelf/gelf.go b/daemon/logger/gelf/gelf.go
index de209ce..d902c0c 100644
--- a/daemon/logger/gelf/gelf.go
+++ b/daemon/logger/gelf/gelf.go
@@ -23,7 +23,7 @@
 const name = "gelf"
 
 type gelfLogger struct {
-	writer   *gelf.Writer
+	writer   gelf.Writer
 	info     logger.Info
 	hostname string
 	rawExtra json.RawMessage
@@ -89,8 +89,56 @@
 		return nil, err
 	}
 
-	// create new gelfWriter
-	gelfWriter, err := gelf.NewWriter(address)
+	var gelfWriter gelf.Writer
+	if address.Scheme == "udp" {
+		gelfWriter, err = newGELFUDPWriter(address.Host, info)
+		if err != nil {
+			return nil, err
+		}
+	} else if address.Scheme == "tcp" {
+		gelfWriter, err = newGELFTCPWriter(address.Host, info)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return &gelfLogger{
+		writer:   gelfWriter,
+		info:     info,
+		hostname: hostname,
+		rawExtra: rawExtra,
+	}, nil
+}
+
+// create new TCP gelfWriter
+func newGELFTCPWriter(address string, info logger.Info) (gelf.Writer, error) {
+	gelfWriter, err := gelf.NewTCPWriter(address)
+	if err != nil {
+		return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err)
+	}
+
+	if v, ok := info.Config["gelf-tcp-max-reconnect"]; ok {
+		i, err := strconv.Atoi(v)
+		if err != nil || i < 0 {
+			return nil, fmt.Errorf("gelf-tcp-max-reconnect must be a positive integer")
+		}
+		gelfWriter.MaxReconnect = i
+	}
+
+	if v, ok := info.Config["gelf-tcp-reconnect-delay"]; ok {
+		i, err := strconv.Atoi(v)
+		if err != nil || i < 0 {
+			return nil, fmt.Errorf("gelf-tcp-reconnect-delay must be a positive integer")
+		}
+		gelfWriter.ReconnectDelay = time.Duration(i)
+	}
+
+	return gelfWriter, nil
+}
+
+// create new UDP gelfWriter
+func newGELFUDPWriter(address string, info logger.Info) (gelf.Writer, error) {
+	gelfWriter, err := gelf.NewUDPWriter(address)
 	if err != nil {
 		return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err)
 	}
@@ -116,12 +164,7 @@
 		gelfWriter.CompressionLevel = val
 	}
 
-	return &gelfLogger{
-		writer:   gelfWriter,
-		info:     info,
-		hostname: hostname,
-		rawExtra: rawExtra,
-	}, nil
+	return gelfWriter, nil
 }
 
 func (s *gelfLogger) Log(msg *logger.Message) error {
@@ -135,7 +178,7 @@
 		Host:     s.hostname,
 		Short:    string(msg.Line),
 		TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0,
-		Level:    level,
+		Level:    int32(level),
 		RawExtra: s.rawExtra,
 	}
 	logger.PutMessage(msg)
@@ -156,6 +199,11 @@
 
 // ValidateLogOpt looks for gelf specific log option gelf-address.
 func ValidateLogOpt(cfg map[string]string) error {
+	address, err := parseAddress(cfg["gelf-address"])
+	if err != nil {
+		return err
+	}
+
 	for key, val := range cfg {
 		switch key {
 		case "gelf-address":
@@ -164,46 +212,59 @@
 		case "env":
 		case "env-regex":
 		case "gelf-compression-level":
+			if address.Scheme != "udp" {
+				return fmt.Errorf("compression is only supported on UDP")
+			}
 			i, err := strconv.Atoi(val)
 			if err != nil || i < flate.DefaultCompression || i > flate.BestCompression {
 				return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key)
 			}
 		case "gelf-compression-type":
+			if address.Scheme != "udp" {
+				return fmt.Errorf("compression is only supported on UDP")
+			}
 			switch val {
 			case "gzip", "zlib", "none":
 			default:
 				return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key)
 			}
+		case "gelf-tcp-max-reconnect", "gelf-tcp-reconnect-delay":
+			if address.Scheme != "tcp" {
+				return fmt.Errorf("%q is only valid for TCP", key)
+			}
+			i, err := strconv.Atoi(val)
+			if err != nil || i < 0 {
+				return fmt.Errorf("%q must be a positive integer", key)
+			}
 		default:
 			return fmt.Errorf("unknown log opt %q for gelf log driver", key)
 		}
 	}
 
-	_, err := parseAddress(cfg["gelf-address"])
-	return err
+	return nil
 }
 
-func parseAddress(address string) (string, error) {
+func parseAddress(address string) (*url.URL, error) {
 	if address == "" {
-		return "", nil
+		return nil, fmt.Errorf("gelf-address is a required parameter")
 	}
 	if !urlutil.IsTransportURL(address) {
-		return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address)
+		return nil, fmt.Errorf("gelf-address should be in form proto://address, got %v", address)
 	}
 	url, err := url.Parse(address)
 	if err != nil {
-		return "", err
+		return nil, err
 	}
 
 	// we support only udp
-	if url.Scheme != "udp" {
-		return "", fmt.Errorf("gelf: endpoint needs to be UDP")
+	if url.Scheme != "udp" && url.Scheme != "tcp" {
+		return nil, fmt.Errorf("gelf: endpoint needs to be TCP or UDP")
 	}
 
 	// get host and port
 	if _, _, err = net.SplitHostPort(url.Host); err != nil {
-		return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port")
+		return nil, fmt.Errorf("gelf: please provide gelf-address as proto://host:port")
 	}
 
-	return url.Host, nil
+	return url, nil
 }
diff --git a/daemon/logger/gelf/gelf_test.go b/daemon/logger/gelf/gelf_test.go
new file mode 100644
index 0000000..087c531
--- /dev/null
+++ b/daemon/logger/gelf/gelf_test.go
@@ -0,0 +1,260 @@
+// +build linux
+
+package gelf
+
+import (
+	"net"
+	"testing"
+
+	"github.com/docker/docker/daemon/logger"
+)
+
+// Validate parseAddress
+func TestParseAddress(t *testing.T) {
+	url, err := parseAddress("udp://127.0.0.1:12201")
+	if err != nil {
+		t.Fatal(err)
+	}
+	if url.String() != "udp://127.0.0.1:12201" {
+		t.Fatalf("Expected address udp://127.0.0.1:12201, got %s", url.String())
+	}
+
+	_, err = parseAddress("127.0.0.1:12201")
+	if err == nil {
+		t.Fatal("Expected error requiring protocol")
+	}
+
+	_, err = parseAddress("http://127.0.0.1:12201")
+	if err == nil {
+		t.Fatal("Expected error restricting protocol")
+	}
+}
+
+// Validate TCP options
+func TestTCPValidateLogOpt(t *testing.T) {
+	err := ValidateLogOpt(map[string]string{
+		"gelf-address": "tcp://127.0.0.1:12201",
+	})
+	if err != nil {
+		t.Fatal("Expected TCP to be supported")
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"gelf-address":           "tcp://127.0.0.1:12201",
+		"gelf-compression-level": "9",
+	})
+	if err == nil {
+		t.Fatal("Expected TCP to reject compression level")
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"gelf-address":          "tcp://127.0.0.1:12201",
+		"gelf-compression-type": "gzip",
+	})
+	if err == nil {
+		t.Fatal("Expected TCP to reject compression type")
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"gelf-address":             "tcp://127.0.0.1:12201",
+		"gelf-tcp-max-reconnect":   "5",
+		"gelf-tcp-reconnect-delay": "10",
+	})
+	if err != nil {
+		t.Fatal("Expected TCP reconnect to be a valid parameters")
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"gelf-address":             "tcp://127.0.0.1:12201",
+		"gelf-tcp-max-reconnect":   "-1",
+		"gelf-tcp-reconnect-delay": "-3",
+	})
+	if err == nil {
+		t.Fatal("Expected negative TCP reconnect to be rejected")
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"gelf-address":             "tcp://127.0.0.1:12201",
+		"gelf-tcp-max-reconnect":   "invalid",
+		"gelf-tcp-reconnect-delay": "invalid",
+	})
+	if err == nil {
+		t.Fatal("Expected TCP reconnect to be required to be an int")
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"gelf-address":             "udp://127.0.0.1:12201",
+		"gelf-tcp-max-reconnect":   "1",
+		"gelf-tcp-reconnect-delay": "3",
+	})
+	if err == nil {
+		t.Fatal("Expected TCP reconnect to be invalid for UDP")
+	}
+}
+
+// Validate UDP options
+func TestUDPValidateLogOpt(t *testing.T) {
+	err := ValidateLogOpt(map[string]string{
+		"gelf-address":           "udp://127.0.0.1:12201",
+		"tag":                    "testtag",
+		"labels":                 "testlabel",
+		"env":                    "testenv",
+		"env-regex":              "testenv-regex",
+		"gelf-compression-level": "9",
+		"gelf-compression-type":  "gzip",
+	})
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"gelf-address":           "udp://127.0.0.1:12201",
+		"gelf-compression-level": "ultra",
+		"gelf-compression-type":  "zlib",
+	})
+	if err == nil {
+		t.Fatal("Expected compression level error")
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"gelf-address":          "udp://127.0.0.1:12201",
+		"gelf-compression-type": "rar",
+	})
+	if err == nil {
+		t.Fatal("Expected compression type error")
+	}
+
+	err = ValidateLogOpt(map[string]string{
+		"invalid": "invalid",
+	})
+	if err == nil {
+		t.Fatal("Expected unknown option error")
+	}
+
+	err = ValidateLogOpt(map[string]string{})
+	if err == nil {
+		t.Fatal("Expected required parameter error")
+	}
+}
+
+// Validate newGELFTCPWriter
+func TestNewGELFTCPWriter(t *testing.T) {
+	address := "127.0.0.1:0"
+	tcpAddr, err := net.ResolveTCPAddr("tcp", address)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	listener, err := net.ListenTCP("tcp", tcpAddr)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	url := "tcp://" + listener.Addr().String()
+	info := logger.Info{
+		Config: map[string]string{
+			"gelf-address":             url,
+			"gelf-tcp-max-reconnect":   "0",
+			"gelf-tcp-reconnect-delay": "0",
+			"tag": "{{.ID}}",
+		},
+		ContainerID: "12345678901234567890",
+	}
+
+	writer, err := newGELFTCPWriter(listener.Addr().String(), info)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = writer.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = listener.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+// Validate newGELFUDPWriter
+func TestNewGELFUDPWriter(t *testing.T) {
+	address := "127.0.0.1:0"
+	info := logger.Info{
+		Config: map[string]string{
+			"gelf-address":           "udp://127.0.0.1:0",
+			"gelf-compression-level": "5",
+			"gelf-compression-type":  "gzip",
+		},
+	}
+
+	writer, err := newGELFUDPWriter(address, info)
+	if err != nil {
+		t.Fatal(err)
+	}
+	writer.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+// Validate New for TCP
+func TestNewTCP(t *testing.T) {
+	address := "127.0.0.1:0"
+	tcpAddr, err := net.ResolveTCPAddr("tcp", address)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	listener, err := net.ListenTCP("tcp", tcpAddr)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	url := "tcp://" + listener.Addr().String()
+	info := logger.Info{
+		Config: map[string]string{
+			"gelf-address":             url,
+			"gelf-tcp-max-reconnect":   "0",
+			"gelf-tcp-reconnect-delay": "0",
+		},
+		ContainerID: "12345678901234567890",
+	}
+
+	logger, err := New(info)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = logger.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = listener.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+}
+
+// Validate New for UDP
+func TestNewUDP(t *testing.T) {
+	info := logger.Info{
+		Config: map[string]string{
+			"gelf-address":           "udp://127.0.0.1:0",
+			"gelf-compression-level": "5",
+			"gelf-compression-type":  "gzip",
+		},
+		ContainerID: "12345678901234567890",
+	}
+
+	logger, err := New(info)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	err = logger.Close()
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/daemon/logger/gelf/gelf_unsupported.go b/daemon/logger/gelf/gelf_unsupported.go
deleted file mode 100644
index 266f73b..0000000
--- a/daemon/logger/gelf/gelf_unsupported.go
+++ /dev/null
@@ -1,3 +0,0 @@
-// +build !linux
-
-package gelf
diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go
index 4d9b999..6aff21f 100644
--- a/daemon/logger/journald/read.go
+++ b/daemon/logger/journald/read.go
@@ -171,13 +171,15 @@
 	return nil
 }
 
-func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor *C.char) *C.char {
+func (s *journald) drainJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, oldCursor *C.char, untilUnixMicro uint64) (*C.char, bool) {
 	var msg, data, cursor *C.char
 	var length C.size_t
 	var stamp C.uint64_t
 	var priority, partial C.int
+	var done bool
 
-	// Walk the journal from here forward until we run out of new entries.
+	// Walk the journal from here forward until we run out of new entries
+	// or we reach the until value (if provided).
 drain:
 	for {
 		// Try not to send a given entry twice.
@@ -195,6 +197,12 @@
 			if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
 				break
 			}
+			// Break if the timestamp exceeds any provided until flag.
+			if untilUnixMicro != 0 && untilUnixMicro < uint64(stamp) {
+				done = true
+				break
+			}
+
 			// Set up the time and text of the entry.
 			timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000)
 			line := C.GoBytes(unsafe.Pointer(msg), C.int(length))
@@ -240,10 +248,10 @@
 		// ensure that we won't be freeing an address that's invalid
 		cursor = nil
 	}
-	return cursor
+	return cursor, done
 }
 
-func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor *C.char) *C.char {
+func (s *journald) followJournal(logWatcher *logger.LogWatcher, j *C.sd_journal, pfd [2]C.int, cursor *C.char, untilUnixMicro uint64) *C.char {
 	s.mu.Lock()
 	s.readers.readers[logWatcher] = logWatcher
 	if s.closed {
@@ -270,9 +278,10 @@
 				break
 			}
 
-			cursor = s.drainJournal(logWatcher, config, j, cursor)
+			var done bool
+			cursor, done = s.drainJournal(logWatcher, j, cursor, untilUnixMicro)
 
-			if status != 1 {
+			if status != 1 || done {
 				// We were notified to stop
 				break
 			}
@@ -304,6 +313,7 @@
 	var cmatch, cursor *C.char
 	var stamp C.uint64_t
 	var sinceUnixMicro uint64
+	var untilUnixMicro uint64
 	var pipes [2]C.int
 
 	// Get a handle to the journal.
@@ -343,10 +353,19 @@
 		nano := config.Since.UnixNano()
 		sinceUnixMicro = uint64(nano / 1000)
 	}
+	// If we have an until value, convert it too
+	if !config.Until.IsZero() {
+		nano := config.Until.UnixNano()
+		untilUnixMicro = uint64(nano / 1000)
+	}
 	if config.Tail > 0 {
 		lines := config.Tail
-		// Start at the end of the journal.
-		if C.sd_journal_seek_tail(j) < 0 {
+		// If until time provided, start from there.
+		// Otherwise start at the end of the journal.
+		if untilUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(untilUnixMicro)) < 0 {
+			logWatcher.Err <- fmt.Errorf("error seeking provided until value")
+			return
+		} else if C.sd_journal_seek_tail(j) < 0 {
 			logWatcher.Err <- fmt.Errorf("error seeking to end of journal")
 			return
 		}
@@ -362,8 +381,7 @@
 			if C.sd_journal_get_realtime_usec(j, &stamp) != 0 {
 				break
 			} else {
-				// Compare the timestamp on the entry
-				// to our threshold value.
+				// Compare the timestamp on the entry to our threshold value.
 				if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) {
 					break
 				}
@@ -392,7 +410,7 @@
 			return
 		}
 	}
-	cursor = s.drainJournal(logWatcher, config, j, nil)
+	cursor, _ = s.drainJournal(logWatcher, j, nil, untilUnixMicro)
 	if config.Follow {
 		// Allocate a descriptor for following the journal, if we'll
 		// need one.  Do it here so that we can report if it fails.
@@ -404,7 +422,7 @@
 			if C.pipe(&pipes[0]) == C.int(-1) {
 				logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe")
 			} else {
-				cursor = s.followJournal(logWatcher, config, j, pipes, cursor)
+				cursor = s.followJournal(logWatcher, j, pipes, cursor, untilUnixMicro)
 				// Let followJournal handle freeing the journal context
 				// object and closing the channel.
 				following = true
diff --git a/daemon/logger/jsonfilelog/jsonfilelog.go b/daemon/logger/jsonfilelog/jsonfilelog.go
index c5f12d0..7aa92f3 100644
--- a/daemon/logger/jsonfilelog/jsonfilelog.go
+++ b/daemon/logger/jsonfilelog/jsonfilelog.go
@@ -7,14 +7,13 @@
 	"bytes"
 	"encoding/json"
 	"fmt"
-	"io"
 	"strconv"
 	"sync"
 
 	"github.com/docker/docker/daemon/logger"
+	"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
 	"github.com/docker/docker/daemon/logger/loggerutils"
-	"github.com/docker/docker/pkg/jsonlog"
-	"github.com/docker/go-units"
+	units "github.com/docker/go-units"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 )
@@ -24,12 +23,9 @@
 
 // JSONFileLogger is Logger implementation for default Docker logging.
 type JSONFileLogger struct {
-	extra []byte // json-encoded extra attributes
-
-	mu      sync.RWMutex
-	buf     *bytes.Buffer // avoids allocating a new buffer on each call to `Log()`
+	mu      sync.Mutex
 	closed  bool
-	writer  *loggerutils.RotateFileWriter
+	writer  *loggerutils.LogFile
 	readers map[*logger.LogWatcher]struct{} // stores the active log followers
 }
 
@@ -65,11 +61,6 @@
 		}
 	}
 
-	writer, err := loggerutils.NewRotateFileWriter(info.LogPath, capval, maxFiles)
-	if err != nil {
-		return nil, err
-	}
-
 	var extra []byte
 	attrs, err := info.ExtraAttributes(nil)
 	if err != nil {
@@ -83,48 +74,44 @@
 		}
 	}
 
+	buf := bytes.NewBuffer(nil)
+	marshalFunc := func(msg *logger.Message) ([]byte, error) {
+		if err := marshalMessage(msg, extra, buf); err != nil {
+			return nil, err
+		}
+		b := buf.Bytes()
+		buf.Reset()
+		return b, nil
+	}
+
+	writer, err := loggerutils.NewLogFile(info.LogPath, capval, maxFiles, marshalFunc, decodeFunc)
+	if err != nil {
+		return nil, err
+	}
+
 	return &JSONFileLogger{
-		buf:     bytes.NewBuffer(nil),
 		writer:  writer,
 		readers: make(map[*logger.LogWatcher]struct{}),
-		extra:   extra,
 	}, nil
 }
 
 // Log converts logger.Message to jsonlog.JSONLog and serializes it to file.
 func (l *JSONFileLogger) Log(msg *logger.Message) error {
 	l.mu.Lock()
-	err := writeMessageBuf(l.writer, msg, l.extra, l.buf)
-	l.buf.Reset()
+	err := l.writer.WriteLogEntry(msg)
 	l.mu.Unlock()
 	return err
 }
 
-func writeMessageBuf(w io.Writer, m *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error {
-	if err := marshalMessage(m, extra, buf); err != nil {
-		logger.PutMessage(m)
-		return err
-	}
-	logger.PutMessage(m)
-	if _, err := w.Write(buf.Bytes()); err != nil {
-		return errors.Wrap(err, "error writing log entry")
-	}
-	return nil
-}
-
 func marshalMessage(msg *logger.Message, extra json.RawMessage, buf *bytes.Buffer) error {
-	timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp)
-	if err != nil {
-		return err
-	}
 	logLine := msg.Line
 	if !msg.Partial {
 		logLine = append(msg.Line, '\n')
 	}
-	err = (&jsonlog.JSONLogs{
+	err := (&jsonlog.JSONLogs{
 		Log:      logLine,
 		Stream:   msg.Source,
-		Created:  timestamp,
+		Created:  msg.Timestamp,
 		RawAttrs: extra,
 	}).MarshalJSONBuf(buf)
 	if err != nil {
diff --git a/daemon/logger/jsonfilelog/jsonfilelog_test.go b/daemon/logger/jsonfilelog/jsonfilelog_test.go
index d2d36e9..893c054 100644
--- a/daemon/logger/jsonfilelog/jsonfilelog_test.go
+++ b/daemon/logger/jsonfilelog/jsonfilelog_test.go
@@ -1,6 +1,7 @@
 package jsonfilelog
 
 import (
+	"bytes"
 	"encoding/json"
 	"io/ioutil"
 	"os"
@@ -11,7 +12,9 @@
 	"time"
 
 	"github.com/docker/docker/daemon/logger"
-	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
+	"github.com/gotestyourself/gotestyourself/fs"
+	"github.com/stretchr/testify/require"
 )
 
 func TestJSONFileLogger(t *testing.T) {
@@ -54,36 +57,38 @@
 	}
 }
 
-func BenchmarkJSONFileLogger(b *testing.B) {
-	cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
-	tmp, err := ioutil.TempDir("", "docker-logger-")
-	if err != nil {
-		b.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-	filename := filepath.Join(tmp, "container.log")
-	l, err := New(logger.Info{
-		ContainerID: cid,
-		LogPath:     filename,
-	})
-	if err != nil {
-		b.Fatal(err)
-	}
-	defer l.Close()
+func BenchmarkJSONFileLoggerLog(b *testing.B) {
+	tmp := fs.NewDir(b, "bench-jsonfilelog")
+	defer tmp.Remove()
 
-	testLine := "Line that thinks that it is log line from docker\n"
-	msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()}
-	jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON()
-	if err != nil {
-		b.Fatal(err)
+	jsonlogger, err := New(logger.Info{
+		ContainerID: "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657",
+		LogPath:     tmp.Join("container.log"),
+		Config: map[string]string{
+			"labels": "first,second",
+		},
+		ContainerLabels: map[string]string{
+			"first":  "label_value",
+			"second": "label_foo",
+		},
+	})
+	require.NoError(b, err)
+	defer jsonlogger.Close()
+
+	msg := &logger.Message{
+		Line:      []byte("Line that thinks that it is log line from docker\n"),
+		Source:    "stderr",
+		Timestamp: time.Now().UTC(),
 	}
-	b.SetBytes(int64(len(jsonlog)+1) * 30)
+
+	buf := bytes.NewBuffer(nil)
+	require.NoError(b, marshalMessage(msg, nil, buf))
+	b.SetBytes(int64(buf.Len()))
+
 	b.ResetTimer()
 	for i := 0; i < b.N; i++ {
-		for j := 0; j < 30; j++ {
-			if err := l.Log(msg); err != nil {
-				b.Fatal(err)
-			}
+		if err := jsonlogger.Log(msg); err != nil {
+			b.Fatal(err)
 		}
 	}
 }
@@ -200,50 +205,3 @@
 		t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected)
 	}
 }
-
-func BenchmarkJSONFileLoggerWithReader(b *testing.B) {
-	b.StopTimer()
-	b.ResetTimer()
-	cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657"
-	dir, err := ioutil.TempDir("", "json-logger-bench")
-	if err != nil {
-		b.Fatal(err)
-	}
-	defer os.RemoveAll(dir)
-
-	l, err := New(logger.Info{
-		ContainerID: cid,
-		LogPath:     filepath.Join(dir, "container.log"),
-	})
-	if err != nil {
-		b.Fatal(err)
-	}
-	defer l.Close()
-	msg := &logger.Message{Line: []byte("line"), Source: "src1"}
-	jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON()
-	if err != nil {
-		b.Fatal(err)
-	}
-	b.SetBytes(int64(len(jsonlog)+1) * 30)
-
-	b.StartTimer()
-
-	go func() {
-		for i := 0; i < b.N; i++ {
-			for j := 0; j < 30; j++ {
-				l.Log(msg)
-			}
-		}
-		l.Close()
-	}()
-
-	lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true})
-	watchClose := lw.WatchClose()
-	for {
-		select {
-		case <-lw.Msg:
-		case <-watchClose:
-			return
-		}
-	}
-}
diff --git a/daemon/logger/jsonfilelog/jsonlog/jsonlog.go b/daemon/logger/jsonfilelog/jsonlog/jsonlog.go
new file mode 100644
index 0000000..549e355
--- /dev/null
+++ b/daemon/logger/jsonfilelog/jsonlog/jsonlog.go
@@ -0,0 +1,25 @@
+package jsonlog
+
+import (
+	"time"
+)
+
+// JSONLog is a log message, typically a single entry from a given log stream.
+type JSONLog struct {
+	// Log is the log message
+	Log string `json:"log,omitempty"`
+	// Stream is the log source
+	Stream string `json:"stream,omitempty"`
+	// Created is the created timestamp of log
+	Created time.Time `json:"time"`
+	// Attrs is the list of extra attributes provided by the user
+	Attrs map[string]string `json:"attrs,omitempty"`
+}
+
+// Reset all fields to their zero value.
+func (jl *JSONLog) Reset() {
+	jl.Log = ""
+	jl.Stream = ""
+	jl.Created = time.Time{}
+	jl.Attrs = make(map[string]string)
+}
diff --git a/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go b/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go
new file mode 100644
index 0000000..37604ae
--- /dev/null
+++ b/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes.go
@@ -0,0 +1,125 @@
+package jsonlog
+
+import (
+	"bytes"
+	"encoding/json"
+	"time"
+	"unicode/utf8"
+)
+
+// JSONLogs marshals encoded JSONLog objects
+type JSONLogs struct {
+	Log     []byte    `json:"log,omitempty"`
+	Stream  string    `json:"stream,omitempty"`
+	Created time.Time `json:"time"`
+
+	// json-encoded bytes
+	RawAttrs json.RawMessage `json:"attrs,omitempty"`
+}
+
+// MarshalJSONBuf is an optimized JSON marshaller that avoids reflection
+// and unnecessary allocation.
+func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error {
+	var first = true
+
+	buf.WriteString(`{`)
+	if len(mj.Log) != 0 {
+		first = false
+		buf.WriteString(`"log":`)
+		ffjsonWriteJSONBytesAsString(buf, mj.Log)
+	}
+	if len(mj.Stream) != 0 {
+		if first {
+			first = false
+		} else {
+			buf.WriteString(`,`)
+		}
+		buf.WriteString(`"stream":`)
+		ffjsonWriteJSONBytesAsString(buf, []byte(mj.Stream))
+	}
+	if len(mj.RawAttrs) > 0 {
+		if first {
+			first = false
+		} else {
+			buf.WriteString(`,`)
+		}
+		buf.WriteString(`"attrs":`)
+		buf.Write(mj.RawAttrs)
+	}
+	if !first {
+		buf.WriteString(`,`)
+	}
+
+	created, err := fastTimeMarshalJSON(mj.Created)
+	if err != nil {
+		return err
+	}
+
+	buf.WriteString(`"time":`)
+	buf.WriteString(created)
+	buf.WriteString(`}`)
+	return nil
+}
+
+func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) {
+	const hex = "0123456789abcdef"
+
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
+				i++
+				continue
+			}
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			default:
+
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hex[b>>4])
+				buf.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.Write(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.Write(s[start:])
+	}
+	buf.WriteByte('"')
+}
diff --git a/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go b/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go
new file mode 100644
index 0000000..4645cd4
--- /dev/null
+++ b/daemon/logger/jsonfilelog/jsonlog/jsonlogbytes_test.go
@@ -0,0 +1,40 @@
+package jsonlog
+
+import (
+	"bytes"
+	"encoding/json"
+	"regexp"
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestJSONLogsMarshalJSONBuf(t *testing.T) {
+	logs := map[*JSONLogs]string{
+		{Log: []byte(`"A log line with \\"`)}:                  `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":`,
+		{Log: []byte("A log line")}:                            `^{\"log\":\"A log line\",\"time\":`,
+		{Log: []byte("A log line with \r")}:                    `^{\"log\":\"A log line with \\r\",\"time\":`,
+		{Log: []byte("A log line with & < >")}:                 `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":`,
+		{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}:        `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":`,
+		{Stream: "stdout"}:                                     `^{\"stream\":\"stdout\",\"time\":`,
+		{Stream: "stdout", Log: []byte("A log line")}:          `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":`,
+		{Created: time.Date(2017, 9, 1, 1, 1, 1, 1, time.UTC)}: `^{\"time\":"2017-09-01T01:01:01.000000001Z"}$`,
+
+		{}: `^{\"time\":"0001-01-01T00:00:00Z"}$`,
+		// These ones are a little weird
+		{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":`,
+		{Log: []byte{0xaF}}:            `^{\"log\":\"\\ufffd\",\"time\":`,
+		{Log: []byte{0x7F}}:            `^{\"log\":\"\x7f\",\"time\":`,
+		// with raw attributes
+		{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":`,
+	}
+	for jsonLog, expression := range logs {
+		var buf bytes.Buffer
+		err := jsonLog.MarshalJSONBuf(&buf)
+		require.NoError(t, err)
+		assert.Regexp(t, regexp.MustCompile(expression), buf.String())
+		assert.NoError(t, json.Unmarshal(buf.Bytes(), &map[string]interface{}{}))
+	}
+}
diff --git a/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go b/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go
new file mode 100644
index 0000000..5fd8023
--- /dev/null
+++ b/daemon/logger/jsonfilelog/jsonlog/time_marshalling.go
@@ -0,0 +1,20 @@
+package jsonlog
+
+import (
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+const jsonFormat = `"` + time.RFC3339Nano + `"`
+
+// fastTimeMarshalJSON avoids one of the extra allocations that
+// time.MarshalJSON is making.
+func fastTimeMarshalJSON(t time.Time) (string, error) {
+	if y := t.Year(); y < 0 || y >= 10000 {
+		// RFC 3339 is clear that years are 4 digits exactly.
+		// See golang.org/issue/4556#c15 for more discussion.
+		return "", errors.New("time.MarshalJSON: year outside of range [0,9999]")
+	}
+	return t.Format(jsonFormat), nil
+}
diff --git a/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go b/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go
new file mode 100644
index 0000000..931a9d3
--- /dev/null
+++ b/daemon/logger/jsonfilelog/jsonlog/time_marshalling_test.go
@@ -0,0 +1,35 @@
+package jsonlog
+
+import (
+	"testing"
+	"time"
+
+	"github.com/docker/docker/internal/testutil"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func TestFastTimeMarshalJSONWithInvalidYear(t *testing.T) {
+	aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local)
+	_, err := fastTimeMarshalJSON(aTime)
+	testutil.ErrorContains(t, err, "year outside of range")
+
+	anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local)
+	_, err = fastTimeMarshalJSON(anotherTime)
+	testutil.ErrorContains(t, err, "year outside of range")
+}
+
+func TestFastTimeMarshalJSON(t *testing.T) {
+	aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC)
+	json, err := fastTimeMarshalJSON(aTime)
+	require.NoError(t, err)
+	assert.Equal(t, "\"2015-05-29T11:01:02.000000003Z\"", json)
+
+	location, err := time.LoadLocation("Europe/Paris")
+	require.NoError(t, err)
+
+	aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location)
+	json, err = fastTimeMarshalJSON(aTime)
+	require.NoError(t, err)
+	assert.Equal(t, "\"2015-05-29T11:01:02.000000003+02:00\"", json)
+}
diff --git a/daemon/logger/jsonfilelog/read.go b/daemon/logger/jsonfilelog/read.go
index 32425b5..f190e01 100644
--- a/daemon/logger/jsonfilelog/read.go
+++ b/daemon/logger/jsonfilelog/read.go
@@ -1,33 +1,45 @@
 package jsonfilelog
 
 import (
-	"bytes"
 	"encoding/json"
-	"fmt"
 	"io"
-	"os"
-	"time"
-
-	"github.com/fsnotify/fsnotify"
-	"golang.org/x/net/context"
 
 	"github.com/docker/docker/api/types/backend"
 	"github.com/docker/docker/daemon/logger"
-	"github.com/docker/docker/daemon/logger/jsonfilelog/multireader"
-	"github.com/docker/docker/pkg/filenotify"
-	"github.com/docker/docker/pkg/jsonlog"
-	"github.com/docker/docker/pkg/tailfile"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
+	"github.com/docker/docker/daemon/logger/jsonfilelog/jsonlog"
 )
 
 const maxJSONDecodeRetry = 20000
 
+// ReadLogs implements the logger's LogReader interface for the logs
+// created by this driver.
+func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
+	logWatcher := logger.NewLogWatcher()
+
+	go l.readLogs(logWatcher, config)
+	return logWatcher
+}
+
+func (l *JSONFileLogger) readLogs(watcher *logger.LogWatcher, config logger.ReadConfig) {
+	defer close(watcher.Msg)
+
+	l.mu.Lock()
+	l.readers[watcher] = struct{}{}
+	l.mu.Unlock()
+
+	l.writer.ReadLogs(config, watcher)
+
+	l.mu.Lock()
+	delete(l.readers, watcher)
+	l.mu.Unlock()
+}
+
 func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) {
 	l.Reset()
 	if err := dec.Decode(l); err != nil {
 		return nil, err
 	}
+
 	var attrs []backend.LogAttr
 	if len(l.Attrs) != 0 {
 		attrs = make([]backend.LogAttr, 0, len(l.Attrs))
@@ -44,306 +56,34 @@
 	return msg, nil
 }
 
-// ReadLogs implements the logger's LogReader interface for the logs
-// created by this driver.
-func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher {
-	logWatcher := logger.NewLogWatcher()
-
-	go l.readLogs(logWatcher, config)
-	return logWatcher
-}
-
-func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) {
-	defer close(logWatcher.Msg)
-
-	// lock so the read stream doesn't get corrupted due to rotations or other log data written while we open these files
-	// This will block writes!!!
-	l.mu.RLock()
-
-	// TODO it would be nice to move a lot of this reader implementation to the rotate logger object
-	pth := l.writer.LogPath()
-	var files []io.ReadSeeker
-	for i := l.writer.MaxFiles(); i > 1; i-- {
-		f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1))
-		if err != nil {
-			if !os.IsNotExist(err) {
-				logWatcher.Err <- err
-				l.mu.RUnlock()
-				return
-			}
-			continue
-		}
-		defer f.Close()
-		files = append(files, f)
-	}
-
-	latestFile, err := os.Open(pth)
-	if err != nil {
-		logWatcher.Err <- errors.Wrap(err, "error opening latest log file")
-		l.mu.RUnlock()
-		return
-	}
-	defer latestFile.Close()
-
-	latestChunk, err := newSectionReader(latestFile)
-
-	// Now we have the reader sectioned, all fd's opened, we can unlock.
-	// New writes/rotates will not affect seeking through these files
-	l.mu.RUnlock()
-
-	if err != nil {
-		logWatcher.Err <- err
-		return
-	}
-
-	if config.Tail != 0 {
-		tailer := multireader.MultiReadSeeker(append(files, latestChunk)...)
-		tailFile(tailer, logWatcher, config.Tail, config.Since)
-	}
-
-	// close all the rotated files
-	for _, f := range files {
-		if err := f.(io.Closer).Close(); err != nil {
-			logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err)
-		}
-	}
-
-	if !config.Follow || l.closed {
-		return
-	}
-
-	notifyRotate := l.writer.NotifyRotate()
-	defer l.writer.NotifyRotateEvict(notifyRotate)
-
-	l.mu.Lock()
-	l.readers[logWatcher] = struct{}{}
-	l.mu.Unlock()
-
-	followLogs(latestFile, logWatcher, notifyRotate, config.Since)
-
-	l.mu.Lock()
-	delete(l.readers, logWatcher)
-	l.mu.Unlock()
-}
-
-func newSectionReader(f *os.File) (*io.SectionReader, error) {
-	// seek to the end to get the size
-	// we'll leave this at the end of the file since section reader does not advance the reader
-	size, err := f.Seek(0, os.SEEK_END)
-	if err != nil {
-		return nil, errors.Wrap(err, "error getting current file size")
-	}
-	return io.NewSectionReader(f, 0, size), nil
-}
-
-func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) {
-	var rdr io.Reader
-	rdr = f
-	if tail > 0 {
-		ls, err := tailfile.TailFile(f, tail)
-		if err != nil {
-			logWatcher.Err <- err
-			return
-		}
-		rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
-	}
+// decodeFunc is used to create a decoder for the log file reader
+func decodeFunc(rdr io.Reader) func() (*logger.Message, error) {
+	l := &jsonlog.JSONLog{}
 	dec := json.NewDecoder(rdr)
-	l := &jsonlog.JSONLog{}
-	for {
-		msg, err := decodeLogLine(dec, l)
-		if err != nil {
-			if err != io.EOF {
-				logWatcher.Err <- err
-			}
-			return
-		}
-		if !since.IsZero() && msg.Timestamp.Before(since) {
-			continue
-		}
-		select {
-		case <-logWatcher.WatchClose():
-			return
-		case logWatcher.Msg <- msg:
-		}
-	}
-}
-
-func watchFile(name string) (filenotify.FileWatcher, error) {
-	fileWatcher, err := filenotify.New()
-	if err != nil {
-		return nil, err
-	}
-
-	if err := fileWatcher.Add(name); err != nil {
-		logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err)
-		fileWatcher.Close()
-		fileWatcher = filenotify.NewPollingWatcher()
-
-		if err := fileWatcher.Add(name); err != nil {
-			fileWatcher.Close()
-			logrus.Debugf("error watching log file for modifications: %v", err)
-			return nil, err
-		}
-	}
-	return fileWatcher, nil
-}
-
-func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) {
-	dec := json.NewDecoder(f)
-	l := &jsonlog.JSONLog{}
-
-	name := f.Name()
-	fileWatcher, err := watchFile(name)
-	if err != nil {
-		logWatcher.Err <- err
-		return
-	}
-	defer func() {
-		f.Close()
-		fileWatcher.Remove(name)
-		fileWatcher.Close()
-	}()
-
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
-	go func() {
-		select {
-		case <-logWatcher.WatchClose():
-			fileWatcher.Remove(name)
-			cancel()
-		case <-ctx.Done():
-			return
-		}
-	}()
-
-	var retries int
-	handleRotate := func() error {
-		f.Close()
-		fileWatcher.Remove(name)
-
-		// retry when the file doesn't exist
-		for retries := 0; retries <= 5; retries++ {
-			f, err = os.Open(name)
-			if err == nil || !os.IsNotExist(err) {
+	return func() (msg *logger.Message, err error) {
+		for retries := 0; retries < maxJSONDecodeRetry; retries++ {
+			msg, err = decodeLogLine(dec, l)
+			if err == nil {
 				break
 			}
-		}
-		if err != nil {
-			return err
-		}
-		if err := fileWatcher.Add(name); err != nil {
-			return err
-		}
-		dec = json.NewDecoder(f)
-		return nil
-	}
 
-	errRetry := errors.New("retry")
-	errDone := errors.New("done")
-	waitRead := func() error {
-		select {
-		case e := <-fileWatcher.Events():
-			switch e.Op {
-			case fsnotify.Write:
-				dec = json.NewDecoder(f)
-				return nil
-			case fsnotify.Rename, fsnotify.Remove:
-				select {
-				case <-notifyRotate:
-				case <-ctx.Done():
-					return errDone
-				}
-				if err := handleRotate(); err != nil {
-					return err
-				}
-				return nil
-			}
-			return errRetry
-		case err := <-fileWatcher.Errors():
-			logrus.Debug("logger got error watching file: %v", err)
-			// Something happened, let's try and stay alive and create a new watcher
-			if retries <= 5 {
-				fileWatcher.Close()
-				fileWatcher, err = watchFile(name)
-				if err != nil {
-					return err
-				}
+			// try again, could be due to a an incomplete json object as we read
+			if _, ok := err.(*json.SyntaxError); ok {
+				dec = json.NewDecoder(rdr)
 				retries++
-				return errRetry
+				continue
 			}
-			return err
-		case <-ctx.Done():
-			return errDone
-		}
-	}
 
-	handleDecodeErr := func(err error) error {
-		if err == io.EOF {
-			for {
-				err := waitRead()
-				if err == nil {
-					break
-				}
-				if err == errRetry {
-					continue
-				}
-				return err
-			}
-			return nil
-		}
-		// try again because this shouldn't happen
-		if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry {
-			dec = json.NewDecoder(f)
-			retries++
-			return nil
-		}
-		// io.ErrUnexpectedEOF is returned from json.Decoder when there is
-		// remaining data in the parser's buffer while an io.EOF occurs.
-		// If the json logger writes a partial json log entry to the disk
-		// while at the same time the decoder tries to decode it, the race condition happens.
-		if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry {
-			reader := io.MultiReader(dec.Buffered(), f)
-			dec = json.NewDecoder(reader)
-			retries++
-			return nil
-		}
-		return err
-	}
-
-	// main loop
-	for {
-		msg, err := decodeLogLine(dec, l)
-		if err != nil {
-			if err := handleDecodeErr(err); err != nil {
-				if err == errDone {
-					return
-				}
-				// we got an unrecoverable error, so return
-				logWatcher.Err <- err
-				return
-			}
-			// ready to try again
-			continue
-		}
-
-		retries = 0 // reset retries since we've succeeded
-		if !since.IsZero() && msg.Timestamp.Before(since) {
-			continue
-		}
-		select {
-		case logWatcher.Msg <- msg:
-		case <-ctx.Done():
-			logWatcher.Msg <- msg
-			for {
-				msg, err := decodeLogLine(dec, l)
-				if err != nil {
-					return
-				}
-				if !since.IsZero() && msg.Timestamp.Before(since) {
-					continue
-				}
-				logWatcher.Msg <- msg
+			// io.ErrUnexpectedEOF is returned from json.Decoder when there is
+			// remaining data in the parser's buffer while an io.EOF occurs.
+			// If the json logger writes a partial json log entry to the disk
+			// while at the same time the decoder tries to decode it, the race condition happens.
+			if err == io.ErrUnexpectedEOF {
+				reader := io.MultiReader(dec.Buffered(), rdr)
+				dec = json.NewDecoder(reader)
+				retries++
 			}
 		}
+		return msg, err
 	}
 }
diff --git a/daemon/logger/jsonfilelog/read_test.go b/daemon/logger/jsonfilelog/read_test.go
new file mode 100644
index 0000000..599fdf9
--- /dev/null
+++ b/daemon/logger/jsonfilelog/read_test.go
@@ -0,0 +1,64 @@
+package jsonfilelog
+
+import (
+	"bytes"
+	"testing"
+	"time"
+
+	"github.com/docker/docker/daemon/logger"
+	"github.com/gotestyourself/gotestyourself/fs"
+	"github.com/stretchr/testify/require"
+)
+
+func BenchmarkJSONFileLoggerReadLogs(b *testing.B) {
+	tmp := fs.NewDir(b, "bench-jsonfilelog")
+	defer tmp.Remove()
+
+	jsonlogger, err := New(logger.Info{
+		ContainerID: "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657",
+		LogPath:     tmp.Join("container.log"),
+		Config: map[string]string{
+			"labels": "first,second",
+		},
+		ContainerLabels: map[string]string{
+			"first":  "label_value",
+			"second": "label_foo",
+		},
+	})
+	require.NoError(b, err)
+	defer jsonlogger.Close()
+
+	msg := &logger.Message{
+		Line:      []byte("Line that thinks that it is log line from docker\n"),
+		Source:    "stderr",
+		Timestamp: time.Now().UTC(),
+	}
+
+	buf := bytes.NewBuffer(nil)
+	require.NoError(b, marshalMessage(msg, nil, buf))
+	b.SetBytes(int64(buf.Len()))
+
+	b.ResetTimer()
+
+	chError := make(chan error, b.N+1)
+	go func() {
+		for i := 0; i < b.N; i++ {
+			chError <- jsonlogger.Log(msg)
+		}
+		chError <- jsonlogger.Close()
+	}()
+
+	lw := jsonlogger.(*JSONFileLogger).ReadLogs(logger.ReadConfig{Follow: true})
+	watchClose := lw.WatchClose()
+	for {
+		select {
+		case <-lw.Msg:
+		case <-watchClose:
+			return
+		case err := <-chError:
+			if err != nil {
+				b.Fatal(err)
+			}
+		}
+	}
+}
diff --git a/daemon/logger/logger.go b/daemon/logger/logger.go
index a9d1e76..dc25beb 100644
--- a/daemon/logger/logger.go
+++ b/daemon/logger/logger.go
@@ -12,7 +12,6 @@
 	"time"
 
 	"github.com/docker/docker/api/types/backend"
-	"github.com/docker/docker/pkg/jsonlog"
 )
 
 // ErrReadLogsNotSupported is returned when the underlying log driver does not support reading
@@ -26,8 +25,6 @@
 func (ErrReadLogsNotSupported) NotImplemented() {}
 
 const (
-	// TimeFormat is the time format used for timestamps sent to log readers.
-	TimeFormat           = jsonlog.RFC3339NanoFixed
 	logWatcherBufferSize = 4096
 )
 
@@ -81,9 +78,17 @@
 	Close() error
 }
 
+// SizedLogger is the interface for logging drivers that can control
+// the size of buffer used for their messages.
+type SizedLogger interface {
+	Logger
+	BufSize() int
+}
+
 // ReadConfig is the configuration passed into ReadLogs.
 type ReadConfig struct {
 	Since  time.Time
+	Until  time.Time
 	Tail   int
 	Follow bool
 }
@@ -135,3 +140,6 @@
 	// Determines if a log driver can read back logs
 	ReadLogs bool
 }
+
+// MarshalFunc is a func that marshals a message into an arbitrary format
+type MarshalFunc func(*Message) ([]byte, error)
diff --git a/daemon/logger/loggerutils/logfile.go b/daemon/logger/loggerutils/logfile.go
new file mode 100644
index 0000000..6a7a689
--- /dev/null
+++ b/daemon/logger/loggerutils/logfile.go
@@ -0,0 +1,454 @@
+package loggerutils
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"os"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/docker/docker/daemon/logger"
+	"github.com/docker/docker/daemon/logger/loggerutils/multireader"
+	"github.com/docker/docker/pkg/filenotify"
+	"github.com/docker/docker/pkg/pubsub"
+	"github.com/docker/docker/pkg/tailfile"
+	"github.com/fsnotify/fsnotify"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+// LogFile is Logger implementation for default Docker logging.
+type LogFile struct {
+	f             *os.File // store for closing
+	closed        bool
+	mu            sync.RWMutex
+	capacity      int64 //maximum size of each file
+	currentSize   int64 // current size of the latest file
+	maxFiles      int   //maximum number of files
+	notifyRotate  *pubsub.Publisher
+	marshal       logger.MarshalFunc
+	createDecoder makeDecoderFunc
+}
+
+type makeDecoderFunc func(rdr io.Reader) func() (*logger.Message, error)
+
+//NewLogFile creates new LogFile
+func NewLogFile(logPath string, capacity int64, maxFiles int, marshaller logger.MarshalFunc, decodeFunc makeDecoderFunc) (*LogFile, error) {
+	log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
+	if err != nil {
+		return nil, err
+	}
+
+	size, err := log.Seek(0, os.SEEK_END)
+	if err != nil {
+		return nil, err
+	}
+
+	return &LogFile{
+		f:             log,
+		capacity:      capacity,
+		currentSize:   size,
+		maxFiles:      maxFiles,
+		notifyRotate:  pubsub.NewPublisher(0, 1),
+		marshal:       marshaller,
+		createDecoder: decodeFunc,
+	}, nil
+}
+
+// WriteLogEntry writes the provided log message to the current log file.
+// This may trigger a rotation event if the max file/capacity limits are hit.
+func (w *LogFile) WriteLogEntry(msg *logger.Message) error {
+	b, err := w.marshal(msg)
+	if err != nil {
+		return errors.Wrap(err, "error marshalling log message")
+	}
+
+	logger.PutMessage(msg)
+
+	w.mu.Lock()
+	if w.closed {
+		w.mu.Unlock()
+		return errors.New("cannot write because the output file was closed")
+	}
+
+	if err := w.checkCapacityAndRotate(); err != nil {
+		w.mu.Unlock()
+		return err
+	}
+
+	n, err := w.f.Write(b)
+	if err == nil {
+		w.currentSize += int64(n)
+	}
+	w.mu.Unlock()
+	return err
+}
+
+func (w *LogFile) checkCapacityAndRotate() error {
+	if w.capacity == -1 {
+		return nil
+	}
+
+	if w.currentSize >= w.capacity {
+		name := w.f.Name()
+		if err := w.f.Close(); err != nil {
+			return errors.Wrap(err, "error closing file")
+		}
+		if err := rotate(name, w.maxFiles); err != nil {
+			return err
+		}
+		file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640)
+		if err != nil {
+			return err
+		}
+		w.f = file
+		w.currentSize = 0
+		w.notifyRotate.Publish(struct{}{})
+	}
+
+	return nil
+}
+
+func rotate(name string, maxFiles int) error {
+	if maxFiles < 2 {
+		return nil
+	}
+	for i := maxFiles - 1; i > 1; i-- {
+		toPath := name + "." + strconv.Itoa(i)
+		fromPath := name + "." + strconv.Itoa(i-1)
+		if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) {
+			return errors.Wrap(err, "error rotating old log entries")
+		}
+	}
+
+	if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) {
+		return errors.Wrap(err, "error rotating current log")
+	}
+	return nil
+}
+
+// LogPath returns the location the given writer logs to.
+func (w *LogFile) LogPath() string {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	return w.f.Name()
+}
+
+// MaxFiles return maximum number of files
+func (w *LogFile) MaxFiles() int {
+	return w.maxFiles
+}
+
+// Close closes underlying file and signals all readers to stop.
+func (w *LogFile) Close() error {
+	w.mu.Lock()
+	defer w.mu.Unlock()
+	if w.closed {
+		return nil
+	}
+	if err := w.f.Close(); err != nil {
+		return err
+	}
+	w.closed = true
+	return nil
+}
+
+// ReadLogs decodes entries from log files and sends them the passed in watcher
+func (w *LogFile) ReadLogs(config logger.ReadConfig, watcher *logger.LogWatcher) {
+	w.mu.RLock()
+	files, err := w.openRotatedFiles()
+	if err != nil {
+		w.mu.RUnlock()
+		watcher.Err <- err
+		return
+	}
+	defer func() {
+		for _, f := range files {
+			f.Close()
+		}
+	}()
+
+	currentFile, err := os.Open(w.f.Name())
+	if err != nil {
+		w.mu.RUnlock()
+		watcher.Err <- err
+		return
+	}
+	defer currentFile.Close()
+
+	currentChunk, err := newSectionReader(currentFile)
+	w.mu.RUnlock()
+
+	if err != nil {
+		watcher.Err <- err
+		return
+	}
+
+	if config.Tail != 0 {
+		seekers := make([]io.ReadSeeker, 0, len(files)+1)
+		for _, f := range files {
+			seekers = append(seekers, f)
+		}
+		seekers = append(seekers, currentChunk)
+		tailFile(multireader.MultiReadSeeker(seekers...), watcher, w.createDecoder, config)
+	}
+
+	w.mu.RLock()
+	if !config.Follow || w.closed {
+		w.mu.RUnlock()
+		return
+	}
+	w.mu.RUnlock()
+
+	notifyRotate := w.notifyRotate.Subscribe()
+	defer w.notifyRotate.Evict(notifyRotate)
+	followLogs(currentFile, watcher, notifyRotate, w.createDecoder, config.Since, config.Until)
+}
+
+func (w *LogFile) openRotatedFiles() (files []*os.File, err error) {
+	defer func() {
+		if err == nil {
+			return
+		}
+		for _, f := range files {
+			f.Close()
+		}
+	}()
+
+	for i := w.maxFiles; i > 1; i-- {
+		f, err := os.Open(fmt.Sprintf("%s.%d", w.f.Name(), i-1))
+		if err != nil {
+			if !os.IsNotExist(err) {
+				return nil, err
+			}
+			continue
+		}
+		files = append(files, f)
+	}
+
+	return files, nil
+}
+
+func newSectionReader(f *os.File) (*io.SectionReader, error) {
+	// seek to the end to get the size
+	// we'll leave this at the end of the file since section reader does not advance the reader
+	size, err := f.Seek(0, os.SEEK_END)
+	if err != nil {
+		return nil, errors.Wrap(err, "error getting current file size")
+	}
+	return io.NewSectionReader(f, 0, size), nil
+}
+
+type decodeFunc func() (*logger.Message, error)
+
+func tailFile(f io.ReadSeeker, watcher *logger.LogWatcher, createDecoder makeDecoderFunc, config logger.ReadConfig) {
+	var rdr io.Reader = f
+	if config.Tail > 0 {
+		ls, err := tailfile.TailFile(f, config.Tail)
+		if err != nil {
+			watcher.Err <- err
+			return
+		}
+		rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n")))
+	}
+
+	decodeLogLine := createDecoder(rdr)
+	for {
+		msg, err := decodeLogLine()
+		if err != nil {
+			if err != io.EOF {
+				watcher.Err <- err
+			}
+			return
+		}
+		if !config.Since.IsZero() && msg.Timestamp.Before(config.Since) {
+			continue
+		}
+		if !config.Until.IsZero() && msg.Timestamp.After(config.Until) {
+			return
+		}
+		select {
+		case <-watcher.WatchClose():
+			return
+		case watcher.Msg <- msg:
+		}
+	}
+}
+
+func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, createDecoder makeDecoderFunc, since, until time.Time) {
+	decodeLogLine := createDecoder(f)
+
+	name := f.Name()
+	fileWatcher, err := watchFile(name)
+	if err != nil {
+		logWatcher.Err <- err
+		return
+	}
+	defer func() {
+		f.Close()
+		fileWatcher.Remove(name)
+		fileWatcher.Close()
+	}()
+
+	ctx, cancel := context.WithCancel(context.Background())
+	defer cancel()
+	go func() {
+		select {
+		case <-logWatcher.WatchClose():
+			fileWatcher.Remove(name)
+			cancel()
+		case <-ctx.Done():
+			return
+		}
+	}()
+
+	var retries int
+	handleRotate := func() error {
+		f.Close()
+		fileWatcher.Remove(name)
+
+		// retry when the file doesn't exist
+		for retries := 0; retries <= 5; retries++ {
+			f, err = os.Open(name)
+			if err == nil || !os.IsNotExist(err) {
+				break
+			}
+		}
+		if err != nil {
+			return err
+		}
+		if err := fileWatcher.Add(name); err != nil {
+			return err
+		}
+		decodeLogLine = createDecoder(f)
+		return nil
+	}
+
+	errRetry := errors.New("retry")
+	errDone := errors.New("done")
+	waitRead := func() error {
+		select {
+		case e := <-fileWatcher.Events():
+			switch e.Op {
+			case fsnotify.Write:
+				decodeLogLine = createDecoder(f)
+				return nil
+			case fsnotify.Rename, fsnotify.Remove:
+				select {
+				case <-notifyRotate:
+				case <-ctx.Done():
+					return errDone
+				}
+				if err := handleRotate(); err != nil {
+					return err
+				}
+				return nil
+			}
+			return errRetry
+		case err := <-fileWatcher.Errors():
+			logrus.Debug("logger got error watching file: %v", err)
+			// Something happened, let's try and stay alive and create a new watcher
+			if retries <= 5 {
+				fileWatcher.Close()
+				fileWatcher, err = watchFile(name)
+				if err != nil {
+					return err
+				}
+				retries++
+				return errRetry
+			}
+			return err
+		case <-ctx.Done():
+			return errDone
+		}
+	}
+
+	handleDecodeErr := func(err error) error {
+		if err != io.EOF {
+			return err
+		}
+
+		for {
+			err := waitRead()
+			if err == nil {
+				break
+			}
+			if err == errRetry {
+				continue
+			}
+			return err
+		}
+		return nil
+	}
+
+	// main loop
+	for {
+		msg, err := decodeLogLine()
+		if err != nil {
+			if err := handleDecodeErr(err); err != nil {
+				if err == errDone {
+					return
+				}
+				// we got an unrecoverable error, so return
+				logWatcher.Err <- err
+				return
+			}
+			// ready to try again
+			continue
+		}
+
+		retries = 0 // reset retries since we've succeeded
+		if !since.IsZero() && msg.Timestamp.Before(since) {
+			continue
+		}
+		if !until.IsZero() && msg.Timestamp.After(until) {
+			return
+		}
+		select {
+		case logWatcher.Msg <- msg:
+		case <-ctx.Done():
+			logWatcher.Msg <- msg
+			for {
+				msg, err := decodeLogLine()
+				if err != nil {
+					return
+				}
+				if !since.IsZero() && msg.Timestamp.Before(since) {
+					continue
+				}
+				if !until.IsZero() && msg.Timestamp.After(until) {
+					return
+				}
+				logWatcher.Msg <- msg
+			}
+		}
+	}
+}
+
+func watchFile(name string) (filenotify.FileWatcher, error) {
+	fileWatcher, err := filenotify.New()
+	if err != nil {
+		return nil, err
+	}
+
+	logger := logrus.WithFields(logrus.Fields{
+		"module": "logger",
+		"fille":  name,
+	})
+
+	if err := fileWatcher.Add(name); err != nil {
+		logger.WithError(err).Warnf("falling back to file poller")
+		fileWatcher.Close()
+		fileWatcher = filenotify.NewPollingWatcher()
+
+		if err := fileWatcher.Add(name); err != nil {
+			fileWatcher.Close()
+			logger.WithError(err).Debugf("error watching log file for modifications")
+			return nil, err
+		}
+	}
+	return fileWatcher, nil
+}
diff --git a/daemon/logger/jsonfilelog/multireader/multireader.go b/daemon/logger/loggerutils/multireader/multireader.go
similarity index 100%
rename from daemon/logger/jsonfilelog/multireader/multireader.go
rename to daemon/logger/loggerutils/multireader/multireader.go
diff --git a/daemon/logger/jsonfilelog/multireader/multireader_test.go b/daemon/logger/loggerutils/multireader/multireader_test.go
similarity index 100%
rename from daemon/logger/jsonfilelog/multireader/multireader_test.go
rename to daemon/logger/loggerutils/multireader/multireader_test.go
diff --git a/daemon/logger/loggerutils/rotatefilewriter.go b/daemon/logger/loggerutils/rotatefilewriter.go
deleted file mode 100644
index 457a39b..0000000
--- a/daemon/logger/loggerutils/rotatefilewriter.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package loggerutils
-
-import (
-	"errors"
-	"os"
-	"strconv"
-	"sync"
-
-	"github.com/docker/docker/pkg/pubsub"
-)
-
-// RotateFileWriter is Logger implementation for default Docker logging.
-type RotateFileWriter struct {
-	f            *os.File // store for closing
-	closed       bool
-	mu           sync.Mutex
-	capacity     int64 //maximum size of each file
-	currentSize  int64 // current size of the latest file
-	maxFiles     int   //maximum number of files
-	notifyRotate *pubsub.Publisher
-}
-
-//NewRotateFileWriter creates new RotateFileWriter
-func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) {
-	log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640)
-	if err != nil {
-		return nil, err
-	}
-
-	size, err := log.Seek(0, os.SEEK_END)
-	if err != nil {
-		return nil, err
-	}
-
-	return &RotateFileWriter{
-		f:            log,
-		capacity:     capacity,
-		currentSize:  size,
-		maxFiles:     maxFiles,
-		notifyRotate: pubsub.NewPublisher(0, 1),
-	}, nil
-}
-
-//WriteLog write log message to File
-func (w *RotateFileWriter) Write(message []byte) (int, error) {
-	w.mu.Lock()
-	if w.closed {
-		w.mu.Unlock()
-		return -1, errors.New("cannot write because the output file was closed")
-	}
-	if err := w.checkCapacityAndRotate(); err != nil {
-		w.mu.Unlock()
-		return -1, err
-	}
-
-	n, err := w.f.Write(message)
-	if err == nil {
-		w.currentSize += int64(n)
-	}
-	w.mu.Unlock()
-	return n, err
-}
-
-func (w *RotateFileWriter) checkCapacityAndRotate() error {
-	if w.capacity == -1 {
-		return nil
-	}
-
-	if w.currentSize >= w.capacity {
-		name := w.f.Name()
-		if err := w.f.Close(); err != nil {
-			return err
-		}
-		if err := rotate(name, w.maxFiles); err != nil {
-			return err
-		}
-		file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0640)
-		if err != nil {
-			return err
-		}
-		w.f = file
-		w.currentSize = 0
-		w.notifyRotate.Publish(struct{}{})
-	}
-
-	return nil
-}
-
-func rotate(name string, maxFiles int) error {
-	if maxFiles < 2 {
-		return nil
-	}
-	for i := maxFiles - 1; i > 1; i-- {
-		toPath := name + "." + strconv.Itoa(i)
-		fromPath := name + "." + strconv.Itoa(i-1)
-		if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) {
-			return err
-		}
-	}
-
-	if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) {
-		return err
-	}
-	return nil
-}
-
-// LogPath returns the location the given writer logs to.
-func (w *RotateFileWriter) LogPath() string {
-	w.mu.Lock()
-	defer w.mu.Unlock()
-	return w.f.Name()
-}
-
-// MaxFiles return maximum number of files
-func (w *RotateFileWriter) MaxFiles() int {
-	return w.maxFiles
-}
-
-//NotifyRotate returns the new subscriber
-func (w *RotateFileWriter) NotifyRotate() chan interface{} {
-	return w.notifyRotate.Subscribe()
-}
-
-//NotifyRotateEvict removes the specified subscriber from receiving any more messages.
-func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) {
-	w.notifyRotate.Evict(sub)
-}
-
-// Close closes underlying file and signals all readers to stop.
-func (w *RotateFileWriter) Close() error {
-	w.mu.Lock()
-	defer w.mu.Unlock()
-	if w.closed {
-		return nil
-	}
-	if err := w.f.Close(); err != nil {
-		return err
-	}
-	w.closed = true
-	return nil
-}
diff --git a/daemon/logger/plugin_unix.go b/daemon/logger/plugin_unix.go
index f254c9c..edf11af 100644
--- a/daemon/logger/plugin_unix.go
+++ b/daemon/logger/plugin_unix.go
@@ -1,4 +1,4 @@
-// +build linux solaris freebsd
+// +build linux freebsd
 
 package logger
 
@@ -6,8 +6,8 @@
 	"context"
 	"io"
 
+	"github.com/containerd/fifo"
 	"github.com/pkg/errors"
-	"github.com/tonistiigi/fifo"
 	"golang.org/x/sys/unix"
 )
 
diff --git a/daemon/logger/plugin_unsupported.go b/daemon/logger/plugin_unsupported.go
index 0a2036c..b649b06 100644
--- a/daemon/logger/plugin_unsupported.go
+++ b/daemon/logger/plugin_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!solaris,!freebsd
+// +build !linux,!freebsd
 
 package logger
 
diff --git a/daemon/logger/ring.go b/daemon/logger/ring.go
index dea8f6b..c89897c 100644
--- a/daemon/logger/ring.go
+++ b/daemon/logger/ring.go
@@ -202,7 +202,6 @@
 	r.closed = true
 	r.wait.Broadcast()
 	r.mu.Unlock()
-	return
 }
 
 // Drain drains all messages from the queue.
diff --git a/daemon/logger/splunk/splunk.go b/daemon/logger/splunk/splunk.go
index 274904b..31a0487 100644
--- a/daemon/logger/splunk/splunk.go
+++ b/daemon/logger/splunk/splunk.go
@@ -15,6 +15,7 @@
 	"net/url"
 	"os"
 	"strconv"
+	"strings"
 	"sync"
 	"time"
 
@@ -363,6 +364,11 @@
 }
 
 func (l *splunkLoggerRaw) Log(msg *logger.Message) error {
+	// empty or whitespace-only messages are not accepted by HEC
+	if strings.TrimSpace(string(msg.Line)) == "" {
+		return nil
+	}
+
 	message := l.createSplunkMessage(msg)
 
 	message.Event = string(append(l.prefix, msg.Line...))
diff --git a/daemon/logger/splunk/splunk_test.go b/daemon/logger/splunk/splunk_test.go
index cbe9a55..ebf835c 100644
--- a/daemon/logger/splunk/splunk_test.go
+++ b/daemon/logger/splunk/splunk_test.go
@@ -8,6 +8,7 @@
 	"time"
 
 	"github.com/docker/docker/daemon/logger"
+	"github.com/stretchr/testify/require"
 )
 
 // Validate options
@@ -125,7 +126,7 @@
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != false ||
+		splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
@@ -255,7 +256,7 @@
 		splunkLoggerDriver.nullMessage.Source != "mysource" ||
 		splunkLoggerDriver.nullMessage.SourceType != "mysourcetype" ||
 		splunkLoggerDriver.nullMessage.Index != "myindex" ||
-		splunkLoggerDriver.gzipCompression != true ||
+		!splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.gzipCompressionLevel != gzip.DefaultCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
@@ -355,7 +356,7 @@
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != true ||
+		!splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.gzipCompressionLevel != gzip.BestSpeed ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
@@ -448,14 +449,10 @@
 	}
 
 	hostname, err := info.Hostname()
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 	loggerDriver, err := New(info)
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 	if !hec.connectionVerified {
 		t.Fatal("By default connection should be verified")
@@ -472,7 +469,7 @@
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != false ||
+		splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
@@ -586,7 +583,7 @@
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != false ||
+		splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
@@ -698,7 +695,7 @@
 		splunkLoggerDriver.nullMessage.Source != "" ||
 		splunkLoggerDriver.nullMessage.SourceType != "" ||
 		splunkLoggerDriver.nullMessage.Index != "" ||
-		splunkLoggerDriver.gzipCompression != false ||
+		splunkLoggerDriver.gzipCompression ||
 		splunkLoggerDriver.postMessagesFrequency != defaultPostMessagesFrequency ||
 		splunkLoggerDriver.postMessagesBatchSize != defaultPostMessagesBatchSize ||
 		splunkLoggerDriver.bufferMaximum != defaultBufferMaximum ||
@@ -716,12 +713,19 @@
 	if err := loggerDriver.Log(&logger.Message{Line: []byte("notjson"), Source: "stdout", Timestamp: message2Time}); err != nil {
 		t.Fatal(err)
 	}
+	message3Time := time.Now()
+	if err := loggerDriver.Log(&logger.Message{Line: []byte(" "), Source: "stdout", Timestamp: message3Time}); err != nil {
+		t.Fatal(err)
+	}
 
 	err = loggerDriver.Close()
 	if err != nil {
 		t.Fatal(err)
 	}
 
+	// message3 would have an empty or whitespace only string in the "event" field
+	// both of which are not acceptable to HEC
+	// thus here we must expect 2 messages, not 3
 	if len(hec.messages) != 2 {
 		t.Fatal("Expected two messages")
 	}
diff --git a/daemon/logs.go b/daemon/logs.go
index 68c5e5a..131360b 100644
--- a/daemon/logs.go
+++ b/daemon/logs.go
@@ -77,8 +77,18 @@
 		since = time.Unix(s, n)
 	}
 
+	var until time.Time
+	if config.Until != "" && config.Until != "0" {
+		s, n, err := timetypes.ParseTimestamps(config.Until, 0)
+		if err != nil {
+			return nil, false, err
+		}
+		until = time.Unix(s, n)
+	}
+
 	readConfig := logger.ReadConfig{
 		Since:  since,
+		Until:  until,
 		Tail:   tailLines,
 		Follow: follow,
 	}
@@ -113,7 +123,7 @@
 				}
 				return
 			case <-ctx.Done():
-				lg.Debug("logs: end stream, ctx is done: %v", ctx.Err())
+				lg.Debugf("logs: end stream, ctx is done: %v", ctx.Err())
 				return
 			case msg, ok := <-logs.Msg:
 				// there is some kind of pool or ring buffer in the logger that
diff --git a/daemon/metrics.go b/daemon/metrics.go
index 8cd363f..44228e7 100644
--- a/daemon/metrics.go
+++ b/daemon/metrics.go
@@ -169,5 +169,4 @@
 			logrus.WithError(err).WithField("name", p.Name()).WithField("socket", sockPath).Error("error unmounting metrics socket for plugin")
 		}
 	}
-	return
 }
diff --git a/daemon/monitor.go b/daemon/monitor.go
index 3946e7a..c0a265d 100644
--- a/daemon/monitor.go
+++ b/daemon/monitor.go
@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"runtime"
@@ -25,15 +26,15 @@
 	}
 }
 
-// StateChanged updates daemon state changes from containerd
-func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error {
-	c := daemon.containers.Get(id)
-	if c == nil {
+// ProcessEvent is called by libcontainerd whenever an event occurs
+func (daemon *Daemon) ProcessEvent(id string, e libcontainerd.EventType, ei libcontainerd.EventInfo) error {
+	c, err := daemon.GetContainer(id)
+	if c == nil || err != nil {
 		return fmt.Errorf("no such container: %s", id)
 	}
 
-	switch e.State {
-	case libcontainerd.StateOOM:
+	switch e {
+	case libcontainerd.EventOOM:
 		// StateOOM is Linux specific and should never be hit on Windows
 		if runtime.GOOS == "windows" {
 			return errors.New("received StateOOM from libcontainerd on Windows. This should never happen")
@@ -43,63 +44,72 @@
 			return err
 		}
 		daemon.LogContainerEvent(c, "oom")
-	case libcontainerd.StateExit:
+	case libcontainerd.EventExit:
+		if int(ei.Pid) == c.Pid {
+			_, _, err := daemon.containerd.DeleteTask(context.Background(), c.ID)
+			if err != nil {
+				logrus.WithError(err).Warnf("failed to delete container %s from containerd", c.ID)
+			}
 
-		c.Lock()
-		c.StreamConfig.Wait()
-		c.Reset(false)
+			c.Lock()
+			c.StreamConfig.Wait()
+			c.Reset(false)
 
-		// If daemon is being shutdown, don't let the container restart
-		restart, wait, err := c.RestartManager().ShouldRestart(e.ExitCode, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt))
-		if err == nil && restart {
-			c.RestartCount++
-			c.SetRestarting(platformConstructExitStatus(e))
-		} else {
-			c.SetStopped(platformConstructExitStatus(e))
-			defer daemon.autoRemove(c)
-		}
+			exitStatus := container.ExitStatus{
+				ExitCode:  int(ei.ExitCode),
+				ExitedAt:  ei.ExitedAt,
+				OOMKilled: ei.OOMKilled,
+			}
+			restart, wait, err := c.RestartManager().ShouldRestart(ei.ExitCode, daemon.IsShuttingDown() || c.HasBeenManuallyStopped, time.Since(c.StartedAt))
+			if err == nil && restart {
+				c.RestartCount++
+				c.SetRestarting(&exitStatus)
+			} else {
+				c.SetStopped(&exitStatus)
+				defer daemon.autoRemove(c)
+			}
 
-		// cancel healthcheck here, they will be automatically
-		// restarted if/when the container is started again
-		daemon.stopHealthchecks(c)
-		attributes := map[string]string{
-			"exitCode": strconv.Itoa(int(e.ExitCode)),
-		}
-		daemon.LogContainerEventWithAttributes(c, "die", attributes)
-		daemon.Cleanup(c)
+			// cancel healthcheck here, they will be automatically
+			// restarted if/when the container is started again
+			daemon.stopHealthchecks(c)
+			attributes := map[string]string{
+				"exitCode": strconv.Itoa(int(ei.ExitCode)),
+			}
+			daemon.LogContainerEventWithAttributes(c, "die", attributes)
+			daemon.Cleanup(c)
 
-		if err == nil && restart {
-			go func() {
-				err := <-wait
-				if err == nil {
-					// daemon.netController is initialized when daemon is restoring containers.
-					// But containerStart will use daemon.netController segment.
-					// So to avoid panic at startup process, here must wait util daemon restore done.
-					daemon.waitForStartupDone()
-					if err = daemon.containerStart(c, "", "", false); err != nil {
-						logrus.Debugf("failed to restart container: %+v", err)
+			if err == nil && restart {
+				go func() {
+					err := <-wait
+					if err == nil {
+						// daemon.netController is initialized when daemon is restoring containers.
+						// But containerStart will use daemon.netController segment.
+						// So to avoid panic at startup process, here must wait util daemon restore done.
+						daemon.waitForStartupDone()
+						if err = daemon.containerStart(c, "", "", false); err != nil {
+							logrus.Debugf("failed to restart container: %+v", err)
+						}
 					}
-				}
-				if err != nil {
-					c.SetStopped(platformConstructExitStatus(e))
-					defer daemon.autoRemove(c)
-					if err != restartmanager.ErrRestartCanceled {
-						logrus.Errorf("restartmanger wait error: %+v", err)
+					if err != nil {
+						c.SetStopped(&exitStatus)
+						defer daemon.autoRemove(c)
+						if err != restartmanager.ErrRestartCanceled {
+							logrus.Errorf("restartmanger wait error: %+v", err)
+						}
 					}
-				}
-			}()
+				}()
+			}
+
+			daemon.setStateCounter(c)
+			defer c.Unlock()
+			if err := c.CheckpointTo(daemon.containersReplica); err != nil {
+				return err
+			}
+			return daemon.postRunProcessing(c, ei)
 		}
 
-		daemon.setStateCounter(c)
-
-		defer c.Unlock()
-		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
-			return err
-		}
-		return daemon.postRunProcessing(c, e)
-	case libcontainerd.StateExitProcess:
-		if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil {
-			ec := int(e.ExitCode)
+		if execConfig := c.ExecCommands.ByPid(int(ei.Pid)); execConfig != nil {
+			ec := int(ei.ExitCode)
 			execConfig.Lock()
 			defer execConfig.Unlock()
 			execConfig.ExitCode = &ec
@@ -111,42 +121,59 @@
 
 			// remove the exec command from the container's store only and not the
 			// daemon's store so that the exec command can be inspected.
-			c.ExecCommands.Delete(execConfig.ID)
+			c.ExecCommands.Delete(execConfig.ID, execConfig.Pid)
 		} else {
-			logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e)
+			logrus.WithFields(logrus.Fields{
+				"container": c.ID,
+				"exec-pid":  ei.Pid,
+			}).Warnf("Ignoring Exit Event, no such exec command found")
 		}
-	case libcontainerd.StateStart, libcontainerd.StateRestore:
-		// Container is already locked in this case
-		c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart)
-		c.HasBeenManuallyStopped = false
-		c.HasBeenStartedBefore = true
-		daemon.setStateCounter(c)
+	case libcontainerd.EventStart:
+		c.Lock()
+		defer c.Unlock()
 
-		daemon.initHealthMonitor(c)
-		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
-			c.Reset(false)
-			return err
+		// This is here to handle start not generated by docker
+		if !c.Running {
+			c.SetRunning(int(ei.Pid), false)
+			c.HasBeenManuallyStopped = false
+			c.HasBeenStartedBefore = true
+			daemon.setStateCounter(c)
+
+			daemon.initHealthMonitor(c)
+
+			if err := c.CheckpointTo(daemon.containersReplica); err != nil {
+				return err
+			}
+			daemon.LogContainerEvent(c, "start")
 		}
 
-		daemon.LogContainerEvent(c, "start")
-	case libcontainerd.StatePause:
-		// Container is already locked in this case
-		c.Paused = true
-		daemon.setStateCounter(c)
-		daemon.updateHealthMonitor(c)
-		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
-			return err
+	case libcontainerd.EventPaused:
+		c.Lock()
+		defer c.Unlock()
+
+		if !c.Paused {
+			c.Paused = true
+			daemon.setStateCounter(c)
+			daemon.updateHealthMonitor(c)
+			if err := c.CheckpointTo(daemon.containersReplica); err != nil {
+				return err
+			}
+			daemon.LogContainerEvent(c, "pause")
 		}
-		daemon.LogContainerEvent(c, "pause")
-	case libcontainerd.StateResume:
-		// Container is already locked in this case
-		c.Paused = false
-		daemon.setStateCounter(c)
-		daemon.updateHealthMonitor(c)
-		if err := c.CheckpointTo(daemon.containersReplica); err != nil {
-			return err
+	case libcontainerd.EventResumed:
+		c.Lock()
+		defer c.Unlock()
+
+		if c.Paused {
+			c.Paused = false
+			daemon.setStateCounter(c)
+			daemon.updateHealthMonitor(c)
+
+			if err := c.CheckpointTo(daemon.containersReplica); err != nil {
+				return err
+			}
+			daemon.LogContainerEvent(c, "unpause")
 		}
-		daemon.LogContainerEvent(c, "unpause")
 	}
 	return nil
 }
diff --git a/daemon/monitor_linux.go b/daemon/monitor_linux.go
index 09f5af5..0995758 100644
--- a/daemon/monitor_linux.go
+++ b/daemon/monitor_linux.go
@@ -5,15 +5,7 @@
 	"github.com/docker/docker/libcontainerd"
 )
 
-// platformConstructExitStatus returns a platform specific exit status structure
-func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus {
-	return &container.ExitStatus{
-		ExitCode:  int(e.ExitCode),
-		OOMKilled: e.OOMKilled,
-	}
-}
-
 // postRunProcessing perfoms any processing needed on the container after it has stopped.
-func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error {
+func (daemon *Daemon) postRunProcessing(_ *container.Container, _ libcontainerd.EventInfo) error {
 	return nil
 }
diff --git a/daemon/monitor_solaris.go b/daemon/monitor_solaris.go
index 5ccfada..0995758 100644
--- a/daemon/monitor_solaris.go
+++ b/daemon/monitor_solaris.go
@@ -5,14 +5,7 @@
 	"github.com/docker/docker/libcontainerd"
 )
 
-// platformConstructExitStatus returns a platform specific exit status structure
-func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus {
-	return &container.ExitStatus{
-		ExitCode: int(e.ExitCode),
-	}
-}
-
 // postRunProcessing perfoms any processing needed on the container after it has stopped.
-func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error {
+func (daemon *Daemon) postRunProcessing(_ *container.Container, _ libcontainerd.EventInfo) error {
 	return nil
 }
diff --git a/daemon/monitor_windows.go b/daemon/monitor_windows.go
index 15d656d..dd5a09a 100644
--- a/daemon/monitor_windows.go
+++ b/daemon/monitor_windows.go
@@ -1,40 +1,52 @@
 package daemon
 
 import (
-	"fmt"
+	"context"
 
 	"github.com/docker/docker/container"
 	"github.com/docker/docker/libcontainerd"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
 )
 
-// platformConstructExitStatus returns a platform specific exit status structure
-func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus {
-	return &container.ExitStatus{
-		ExitCode: int(e.ExitCode),
-	}
-}
-
-// postRunProcessing perfoms any processing needed on the container after it has stopped.
-func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error {
-	if e.ExitCode == 0 && e.UpdatePending {
-		spec, err := daemon.createSpec(container)
+// postRunProcessing starts a servicing container if required
+func (daemon *Daemon) postRunProcessing(c *container.Container, ei libcontainerd.EventInfo) error {
+	if ei.ExitCode == 0 && ei.UpdatePending {
+		spec, err := daemon.createSpec(c)
 		if err != nil {
 			return err
 		}
-
 		// Turn on servicing
 		spec.Windows.Servicing = true
 
-		copts, err := daemon.getLibcontainerdCreateOptions(container)
+		copts, err := daemon.getLibcontainerdCreateOptions(c)
 		if err != nil {
 			return err
 		}
 
-		// Create a new servicing container, which will start, complete the update, and merge back the
-		// results if it succeeded, all as part of the below function call.
-		if err := daemon.containerd.Create((container.ID + "_servicing"), "", "", *spec, container.InitializeStdio, copts...); err != nil {
-			container.SetExitCode(-1)
-			return fmt.Errorf("Post-run update servicing failed: %s", err)
+		// Create a new servicing container, which will start, complete the
+		// update, and merge back the results if it succeeded, all as part of
+		// the below function call.
+		ctx := context.Background()
+		svcID := c.ID + "_servicing"
+		logger := logrus.WithField("container", svcID)
+		if err := daemon.containerd.Create(ctx, svcID, spec, copts); err != nil {
+			c.SetExitCode(-1)
+			return errors.Wrap(err, "post-run update servicing failed")
+		}
+		_, err = daemon.containerd.Start(ctx, svcID, "", false, nil)
+		if err != nil {
+			logger.WithError(err).Warn("failed to run servicing container")
+			if err := daemon.containerd.Delete(ctx, svcID); err != nil {
+				logger.WithError(err).Warn("failed to delete servicing container")
+			}
+		} else {
+			if _, _, err := daemon.containerd.DeleteTask(ctx, svcID); err != nil {
+				logger.WithError(err).Warn("failed to delete servicing container task")
+			}
+			if err := daemon.containerd.Delete(ctx, svcID); err != nil {
+				logger.WithError(err).Warn("failed to delete servicing container")
+			}
 		}
 	}
 	return nil
diff --git a/daemon/names.go b/daemon/names.go
index e61e940..3ef96b1 100644
--- a/daemon/names.go
+++ b/daemon/names.go
@@ -4,8 +4,8 @@
 	"fmt"
 	"strings"
 
-	"github.com/docker/docker/api"
 	"github.com/docker/docker/container"
+	"github.com/docker/docker/daemon/names"
 	"github.com/docker/docker/pkg/namesgenerator"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/pkg/errors"
@@ -13,8 +13,8 @@
 )
 
 var (
-	validContainerNameChars   = api.RestrictedNameChars
-	validContainerNamePattern = api.RestrictedNamePattern
+	validContainerNameChars   = names.RestrictedNameChars
+	validContainerNamePattern = names.RestrictedNamePattern
 )
 
 func (daemon *Daemon) registerName(container *container.Container) error {
@@ -69,7 +69,7 @@
 				logrus.Errorf("got unexpected error while looking up reserved name: %v", err)
 				return "", err
 			}
-			return "", validationError{errors.Errorf("Conflict. The container name %q is already in use by container %q. You have to remove (or rename) that container to be able to reuse that name.", name, id)}
+			return "", nameConflictError{id: id, name: name}
 		}
 		return "", errors.Wrapf(err, "error reserving name: %q", name)
 	}
diff --git a/daemon/names/names.go b/daemon/names/names.go
new file mode 100644
index 0000000..26f6748
--- /dev/null
+++ b/daemon/names/names.go
@@ -0,0 +1,9 @@
+package names
+
+import "regexp"
+
+// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names.
+const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]`
+
+// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters.
+var RestrictedNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`)
diff --git a/daemon/network.go b/daemon/network.go
index 62fe951..61548c5 100644
--- a/daemon/network.go
+++ b/daemon/network.go
@@ -3,7 +3,6 @@
 import (
 	"fmt"
 	"net"
-	"runtime"
 	"sort"
 	"strings"
 	"sync"
@@ -31,18 +30,27 @@
 
 // FindNetwork function finds a network for a given string that can represent network name or id
 func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) {
-	// Find by Name
-	n, err := daemon.GetNetworkByName(idName)
-	if err != nil && !isNoSuchNetworkError(err) {
-		return nil, err
+	// 1. match by full ID.
+	n, err := daemon.GetNetworkByID(idName)
+	if err == nil || !isNoSuchNetworkError(err) {
+		return n, err
 	}
 
-	if n != nil {
-		return n, nil
+	// 2. match by full name
+	n, err = daemon.GetNetworkByName(idName)
+	if err == nil || !isNoSuchNetworkError(err) {
+		return n, err
 	}
 
-	// Find by id
-	return daemon.GetNetworkByID(idName)
+	// 3. match by ID prefix
+	list := daemon.GetNetworksByIDPrefix(idName)
+	if len(list) == 0 {
+		return nil, errors.WithStack(networkNotFound(idName))
+	}
+	if len(list) > 1 {
+		return nil, errors.WithStack(invalidIdentifier(idName))
+	}
+	return list[0], nil
 }
 
 func isNoSuchNetworkError(err error) bool {
@@ -50,18 +58,14 @@
 	return ok
 }
 
-// GetNetworkByID function returns a network whose ID begins with the given prefix.
-// It fails with an error if no matching, or more than one matching, networks are found.
-func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) {
-	list := daemon.GetNetworksByID(partialID)
-
-	if len(list) == 0 {
-		return nil, errors.WithStack(networkNotFound(partialID))
+// GetNetworkByID function returns a network whose ID matches the given ID.
+// It fails with an error if no matching network is found.
+func (daemon *Daemon) GetNetworkByID(id string) (libnetwork.Network, error) {
+	c := daemon.netController
+	if c == nil {
+		return nil, libnetwork.ErrNoSuchNetwork(id)
 	}
-	if len(list) > 1 {
-		return nil, errors.WithStack(invalidIdentifier(partialID))
-	}
-	return list[0], nil
+	return c.NetworkByID(id)
 }
 
 // GetNetworkByName function returns a network for a given network name.
@@ -77,8 +81,8 @@
 	return c.NetworkByName(name)
 }
 
-// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks
-func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network {
+// GetNetworksByIDPrefix returns a list of networks whose ID partially matches zero or more networks
+func (daemon *Daemon) GetNetworksByIDPrefix(partialID string) []libnetwork.Network {
 	c := daemon.netController
 	if c == nil {
 		return nil
@@ -115,6 +119,7 @@
 func (daemon *Daemon) startIngressWorker() {
 	ingressJobsChannel = make(chan *ingressJob, 100)
 	go func() {
+		// nolint: gosimple
 		for {
 			select {
 			case r := <-ingressJobsChannel:
@@ -181,27 +186,8 @@
 		logrus.Errorf("Failed getting ingress network by id after creating: %v", err)
 	}
 
-	sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress())
-	if err != nil {
-		if _, ok := err.(networktypes.ForbiddenError); !ok {
-			logrus.Errorf("Failed creating ingress sandbox: %v", err)
-		}
-		return
-	}
-
-	ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil))
-	if err != nil {
-		logrus.Errorf("Failed creating ingress endpoint: %v", err)
-		return
-	}
-
-	if err := ep.Join(sb, nil); err != nil {
-		logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err)
-		return
-	}
-
-	if err := sb.EnableService(); err != nil {
-		logrus.Errorf("Failed enabling service for ingress sandbox")
+	if err = daemon.createLoadBalancerSandbox("ingress", create.ID, ip, n, libnetwork.OptionIngress()); err != nil {
+		logrus.Errorf("Failed creating load balancer sandbox for ingress network: %v", err)
 	}
 }
 
@@ -232,7 +218,6 @@
 		logrus.Errorf("Failed to delete ingress network %s: %v", n.ID(), err)
 		return
 	}
-	return
 }
 
 // SetNetworkBootstrapKeys sets the bootstrap keys.
@@ -283,6 +268,34 @@
 	return resp, err
 }
 
+func (daemon *Daemon) createLoadBalancerSandbox(prefix, id string, ip net.IP, n libnetwork.Network, options ...libnetwork.SandboxOption) error {
+	c := daemon.netController
+	sandboxName := prefix + "-sbox"
+	sb, err := c.NewSandbox(sandboxName, options...)
+	if err != nil {
+		if _, ok := err.(networktypes.ForbiddenError); !ok {
+			return errors.Wrapf(err, "Failed creating %s sandbox", sandboxName)
+		}
+		return nil
+	}
+
+	endpointName := prefix + "-endpoint"
+	ep, err := n.CreateEndpoint(endpointName, libnetwork.CreateOptionIpam(ip, nil, nil, nil), libnetwork.CreateOptionLoadBalancer())
+	if err != nil {
+		return errors.Wrapf(err, "Failed creating %s in sandbox %s", endpointName, sandboxName)
+	}
+
+	if err := ep.Join(sb, nil); err != nil {
+		return errors.Wrapf(err, "Failed joining %s to sandbox %s", endpointName, sandboxName)
+	}
+
+	if err := sb.EnableService(); err != nil {
+		return errors.Wrapf(err, "Failed enabling service in %s sandbox", sandboxName)
+	}
+
+	return nil
+}
+
 func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) {
 	if runconfig.IsPreDefinedNetwork(create.Name) && !agent {
 		err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name)
@@ -360,6 +373,18 @@
 	}
 	daemon.LogNetworkEvent(n, "create")
 
+	if agent && !n.Info().Ingress() && n.Type() == "overlay" {
+		nodeIP, exists := daemon.GetAttachmentStore().GetIPForNetwork(id)
+		if !exists {
+			return nil, fmt.Errorf("Failed to find a load balancer IP to use for network: %v", id)
+		}
+
+		if err := daemon.createLoadBalancerSandbox(create.Name, id, nodeIP, n); err != nil {
+			return nil, err
+		}
+
+	}
+
 	return &types.NetworkCreateResponse{
 		ID:      n.ID(),
 		Warning: warning,
@@ -426,9 +451,6 @@
 // network. If either cannot be found, an err is returned. If the
 // network cannot be set up, an err is returned.
 func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error {
-	if runtime.GOOS == "solaris" {
-		return errors.New("docker network connect is unsupported on Solaris platform")
-	}
 	container, err := daemon.GetContainer(containerName)
 	if err != nil {
 		return err
@@ -439,9 +461,6 @@
 // DisconnectContainerFromNetwork disconnects the given container from
 // the given network. If either cannot be found, an err is returned.
 func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, networkName string, force bool) error {
-	if runtime.GOOS == "solaris" {
-		return errors.New("docker network disconnect is unsupported on Solaris platform")
-	}
 	container, err := daemon.GetContainer(containerName)
 	if err != nil {
 		if force {
@@ -496,6 +515,37 @@
 	return daemon.deleteNetwork(networkID, false)
 }
 
+func (daemon *Daemon) deleteLoadBalancerSandbox(n libnetwork.Network) {
+	controller := daemon.netController
+
+	//The only endpoint left should be the LB endpoint (nw.Name() + "-endpoint")
+	endpoints := n.Endpoints()
+	if len(endpoints) == 1 {
+		sandboxName := n.Name() + "-sbox"
+
+		info := endpoints[0].Info()
+		if info != nil {
+			sb := info.Sandbox()
+			if sb != nil {
+				if err := sb.DisableService(); err != nil {
+					logrus.Warnf("Failed to disable service on sandbox %s: %v", sandboxName, err)
+					//Ignore error and attempt to delete the load balancer endpoint
+				}
+			}
+		}
+
+		if err := endpoints[0].Delete(true); err != nil {
+			logrus.Warnf("Failed to delete endpoint %s (%s) in %s: %v", endpoints[0].Name(), endpoints[0].ID(), sandboxName, err)
+			//Ignore error and attempt to delete the sandbox.
+		}
+
+		if err := controller.SandboxDestroy(sandboxName); err != nil {
+			logrus.Warnf("Failed to delete %s sandbox: %v", sandboxName, err)
+			//Ignore error and attempt to delete the network.
+		}
+	}
+}
+
 func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error {
 	nw, err := daemon.FindNetwork(networkID)
 	if err != nil {
@@ -517,6 +567,10 @@
 		return notAllowedError{err}
 	}
 
+	if !nw.Info().Ingress() && nw.Type() == "overlay" {
+		daemon.deleteLoadBalancerSandbox(nw)
+	}
+
 	if err := nw.Delete(); err != nil {
 		return err
 	}
diff --git a/daemon/network/settings.go b/daemon/network/settings.go
index 8f6b7dd..71343ff 100644
--- a/daemon/network/settings.go
+++ b/daemon/network/settings.go
@@ -1,9 +1,12 @@
 package network
 
 import (
+	"net"
+
 	networktypes "github.com/docker/docker/api/types/network"
 	clustertypes "github.com/docker/docker/daemon/cluster/provider"
 	"github.com/docker/go-connections/nat"
+	"github.com/pkg/errors"
 )
 
 // Settings stores configuration details about the daemon network config
@@ -31,3 +34,36 @@
 	*networktypes.EndpointSettings
 	IPAMOperational bool
 }
+
+// AttachmentStore stores the load balancer IP address for a network id.
+type AttachmentStore struct {
+	//key: networkd id
+	//value: load balancer ip address
+	networkToNodeLBIP map[string]net.IP
+}
+
+// ResetAttachments clears any existing load balancer IP to network mapping and
+// sets the mapping to the given attachments.
+func (store *AttachmentStore) ResetAttachments(attachments map[string]string) error {
+	store.ClearAttachments()
+	for nid, nodeIP := range attachments {
+		ip, _, err := net.ParseCIDR(nodeIP)
+		if err != nil {
+			store.networkToNodeLBIP = make(map[string]net.IP)
+			return errors.Wrapf(err, "Failed to parse load balancer address %s", nodeIP)
+		}
+		store.networkToNodeLBIP[nid] = ip
+	}
+	return nil
+}
+
+// ClearAttachments clears all the mappings of network to load balancer IP Address.
+func (store *AttachmentStore) ClearAttachments() {
+	store.networkToNodeLBIP = make(map[string]net.IP)
+}
+
+// GetIPForNetwork return the load balancer IP address for the given network.
+func (store *AttachmentStore) GetIPForNetwork(networkID string) (net.IP, bool) {
+	ip, exists := store.networkToNodeLBIP[networkID]
+	return ip, exists
+}
diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go
index 9cf6674..2c5d94d 100644
--- a/daemon/oci_linux.go
+++ b/daemon/oci_linux.go
@@ -19,7 +19,6 @@
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/stringutils"
-	"github.com/docker/docker/pkg/symlink"
 	"github.com/docker/docker/volume"
 	"github.com/opencontainers/runc/libcontainer/apparmor"
 	"github.com/opencontainers/runc/libcontainer/cgroups"
@@ -27,8 +26,10 @@
 	"github.com/opencontainers/runc/libcontainer/user"
 	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/sirupsen/logrus"
+	"golang.org/x/sys/unix"
 )
 
+// nolint: gosimple
 var (
 	deviceCgroupRuleRegex = regexp.MustCompile("^([acb]) ([0-9]+|\\*):([0-9]+|\\*) ([rwm]{1,3})$")
 )
@@ -155,7 +156,7 @@
 	return nil
 }
 
-func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error {
+func (daemon *Daemon) setRlimits(s *specs.Spec, c *container.Container) error {
 	var rlimits []specs.POSIXRlimit
 
 	// We want to leave the original HostConfig alone so make a copy here
@@ -186,7 +187,7 @@
 }
 
 func readUserFile(c *container.Container, p string) (io.ReadCloser, error) {
-	fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS)
+	fp, err := c.GetResourcePath(p)
 	if err != nil {
 		return nil, err
 	}
@@ -469,6 +470,38 @@
 	return nil
 }
 
+// Get the set of mount flags that are set on the mount that contains the given
+// path and are locked by CL_UNPRIVILEGED. This is necessary to ensure that
+// bind-mounting "with options" will not fail with user namespaces, due to
+// kernel restrictions that require user namespace mounts to preserve
+// CL_UNPRIVILEGED locked flags.
+func getUnprivilegedMountFlags(path string) ([]string, error) {
+	var statfs unix.Statfs_t
+	if err := unix.Statfs(path, &statfs); err != nil {
+		return nil, err
+	}
+
+	// The set of keys come from https://github.com/torvalds/linux/blob/v4.13/fs/namespace.c#L1034-L1048.
+	unprivilegedFlags := map[uint64]string{
+		unix.MS_RDONLY:     "ro",
+		unix.MS_NODEV:      "nodev",
+		unix.MS_NOEXEC:     "noexec",
+		unix.MS_NOSUID:     "nosuid",
+		unix.MS_NOATIME:    "noatime",
+		unix.MS_RELATIME:   "relatime",
+		unix.MS_NODIRATIME: "nodiratime",
+	}
+
+	var flags []string
+	for mask, flag := range unprivilegedFlags {
+		if uint64(statfs.Flags)&mask == mask {
+			flags = append(flags, flag)
+		}
+	}
+
+	return flags, nil
+}
+
 var (
 	mountPropagationMap = map[string]int{
 		"private":  mount.PRIVATE,
@@ -497,6 +530,7 @@
 
 	// Filter out mounts from spec
 	noIpc := c.HostConfig.IpcMode.IsNone()
+	// Filter out mounts that are overridden by user supplied mounts
 	var defaultMounts []specs.Mount
 	_, mountDev := userMounts["/dev"]
 	for _, m := range s.Mounts {
@@ -523,7 +557,8 @@
 
 		if m.Source == "tmpfs" {
 			data := m.Data
-			options := []string{"noexec", "nosuid", "nodev", string(volume.DefaultPropagationMode)}
+			parser := volume.NewParser("linux")
+			options := []string{"noexec", "nosuid", "nodev", string(parser.DefaultPropagationMode())}
 			if data != "" {
 				options = append(options, strings.Split(data, ",")...)
 			}
@@ -573,6 +608,19 @@
 			opts = append(opts, mountPropagationReverseMap[pFlag])
 		}
 
+		// If we are using user namespaces, then we must make sure that we
+		// don't drop any of the CL_UNPRIVILEGED "locked" flags of the source
+		// "mount" when we bind-mount. The reason for this is that at the point
+		// when runc sets up the root filesystem, it is already inside a user
+		// namespace, and thus cannot change any flags that are locked.
+		if daemon.configStore.RemappedRoot != "" {
+			unprivOpts, err := getUnprivilegedMountFlags(m.Source)
+			if err != nil {
+				return err
+			}
+			opts = append(opts, unprivOpts...)
+		}
+
 		mt.Options = opts
 		s.Mounts = append(s.Mounts, mt)
 	}
@@ -580,7 +628,7 @@
 	if s.Root.Readonly {
 		for i, m := range s.Mounts {
 			switch m.Destination {
-			case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc
+			case "/proc", "/dev/pts", "/dev/mqueue", "/dev":
 				continue
 			}
 			if _, ok := userMounts[m.Destination]; !ok {
@@ -631,7 +679,7 @@
 		return err
 	}
 	s.Root = &specs.Root{
-		Path:     c.BaseFS,
+		Path:     c.BaseFS.Path(),
 		Readonly: c.HostConfig.ReadonlyRootfs,
 	}
 	if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil {
@@ -715,11 +763,11 @@
 		if err != nil {
 			return nil, err
 		}
-		p, _ = cgroups.GetOwnCgroup("cpu")
+		_, err = cgroups.GetOwnCgroup("cpu")
 		if err != nil {
 			return nil, err
 		}
-		p = filepath.Join(initPath, p)
+		p = filepath.Join(initPath, s.Linux.CgroupsPath)
 	}
 
 	// Clean path to guard against things like ../../../BAD
@@ -734,7 +782,7 @@
 	if err := setDevices(&s, c); err != nil {
 		return nil, fmt.Errorf("linux runtime spec devices: %v", err)
 	}
-	if err := setRlimits(daemon, &s, c); err != nil {
+	if err := daemon.setRlimits(&s, c); err != nil {
 		return nil, fmt.Errorf("linux runtime spec rlimits: %v", err)
 	}
 	if err := setUser(&s, c); err != nil {
diff --git a/daemon/oci_solaris.go b/daemon/oci_solaris.go
deleted file mode 100644
index 610efe1..0000000
--- a/daemon/oci_solaris.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package daemon
-
-import (
-	"fmt"
-	"path/filepath"
-	"sort"
-	"strconv"
-
-	containertypes "github.com/docker/docker/api/types/container"
-	"github.com/docker/docker/container"
-	"github.com/docker/docker/oci"
-	"github.com/docker/libnetwork"
-	"github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func setResources(s *specs.Spec, r containertypes.Resources) error {
-	mem := getMemoryResources(r)
-	s.Solaris.CappedMemory = &mem
-
-	capCPU := getCPUResources(r)
-	s.Solaris.CappedCPU = &capCPU
-
-	return nil
-}
-
-func setUser(s *specs.Spec, c *container.Container) error {
-	uid, gid, additionalGids, err := getUser(c, c.Config.User)
-	if err != nil {
-		return err
-	}
-	s.Process.User.UID = uid
-	s.Process.User.GID = gid
-	s.Process.User.AdditionalGids = additionalGids
-	return nil
-}
-
-func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) {
-	return 0, 0, nil, nil
-}
-
-func (daemon *Daemon) getRunzAnet(ep libnetwork.Endpoint) (specs.Anet, error) {
-	var (
-		linkName  string
-		lowerLink string
-		defRouter string
-	)
-
-	epInfo := ep.Info()
-	if epInfo == nil {
-		return specs.Anet{}, fmt.Errorf("invalid endpoint")
-	}
-
-	nw, err := daemon.GetNetworkByName(ep.Network())
-	if err != nil {
-		return specs.Anet{}, fmt.Errorf("Failed to get network %s: %v", ep.Network(), err)
-	}
-
-	// Evaluate default router, linkname and lowerlink for interface endpoint
-	switch nw.Type() {
-	case "bridge":
-		defRouter = epInfo.Gateway().String()
-		linkName = "net0" // Should always be net0 for a container
-
-		// TODO We construct lowerlink here exactly as done for solaris bridge
-		// initialization. Need modular code to reuse.
-		options := nw.Info().DriverOptions()
-		nwName := options["com.docker.network.bridge.name"]
-		lastChar := nwName[len(nwName)-1:]
-		if _, err = strconv.Atoi(lastChar); err != nil {
-			lowerLink = nwName + "_0"
-		} else {
-			lowerLink = nwName
-		}
-
-	case "overlay":
-		defRouter = ""
-		linkName = "net1"
-
-		// TODO Follows generateVxlanName() in solaris overlay.
-		id := nw.ID()
-		if len(nw.ID()) > 12 {
-			id = nw.ID()[:12]
-		}
-		lowerLink = "vx_" + id + "_0"
-	}
-
-	runzanet := specs.Anet{
-		Linkname:          linkName,
-		Lowerlink:         lowerLink,
-		Allowedaddr:       epInfo.Iface().Address().String(),
-		Configallowedaddr: "true",
-		Defrouter:         defRouter,
-		Linkprotection:    "mac-nospoof, ip-nospoof",
-		Macaddress:        epInfo.Iface().MacAddress().String(),
-	}
-
-	return runzanet, nil
-}
-
-func (daemon *Daemon) setNetworkInterface(s *specs.Spec, c *container.Container) error {
-	var anets []specs.Anet
-
-	sb, err := daemon.netController.SandboxByID(c.NetworkSettings.SandboxID)
-	if err != nil {
-		return fmt.Errorf("Could not obtain sandbox for container")
-	}
-
-	// Populate interfaces required for each endpoint
-	for _, ep := range sb.Endpoints() {
-		runzanet, err := daemon.getRunzAnet(ep)
-		if err != nil {
-			return fmt.Errorf("Failed to get interface information for endpoint %d: %v", ep.ID(), err)
-		}
-		anets = append(anets, runzanet)
-	}
-
-	s.Solaris.Anet = anets
-	if anets != nil {
-		s.Solaris.Milestone = "svc:/milestone/container:default"
-	}
-	return nil
-}
-
-func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error {
-	linkedEnv, err := daemon.setupLinkedContainers(c)
-	if err != nil {
-		return err
-	}
-	s.Root = specs.Root{
-		Path:     filepath.Dir(c.BaseFS),
-		Readonly: c.HostConfig.ReadonlyRootfs,
-	}
-	if err := c.SetupWorkingDirectory(daemon.idMappings.RootPair()); err != nil {
-		return err
-	}
-	cwd := c.Config.WorkingDir
-	s.Process.Args = append([]string{c.Path}, c.Args...)
-	s.Process.Cwd = cwd
-	s.Process.Env = c.CreateDaemonEnvironment(c.Config.Tty, linkedEnv)
-	s.Process.Terminal = c.Config.Tty
-	s.Hostname = c.FullHostname()
-
-	return nil
-}
-
-func (daemon *Daemon) createSpec(c *container.Container) (*specs.Spec, error) {
-	s := oci.DefaultSpec()
-	if err := daemon.populateCommonSpec(&s, c); err != nil {
-		return nil, err
-	}
-
-	if err := setResources(&s, c.HostConfig.Resources); err != nil {
-		return nil, fmt.Errorf("runtime spec resources: %v", err)
-	}
-
-	if err := setUser(&s, c); err != nil {
-		return nil, fmt.Errorf("spec user: %v", err)
-	}
-
-	if err := daemon.setNetworkInterface(&s, c); err != nil {
-		return nil, err
-	}
-
-	if err := daemon.setupIpcDirs(c); err != nil {
-		return nil, err
-	}
-
-	ms, err := daemon.setupMounts(c)
-	if err != nil {
-		return nil, err
-	}
-	ms = append(ms, c.IpcMounts()...)
-	tmpfsMounts, err := c.TmpfsMounts()
-	if err != nil {
-		return nil, err
-	}
-	ms = append(ms, tmpfsMounts...)
-	sort.Sort(mounts(ms))
-
-	return (*specs.Spec)(&s), nil
-}
-
-// mergeUlimits merge the Ulimits from HostConfig with daemon defaults, and update HostConfig
-// It will do nothing on non-Linux platform
-func (daemon *Daemon) mergeUlimits(c *containertypes.HostConfig) {
-	return
-}
diff --git a/daemon/oci_windows.go b/daemon/oci_windows.go
index 0254351..c7c94f3 100644
--- a/daemon/oci_windows.go
+++ b/daemon/oci_windows.go
@@ -4,6 +4,7 @@
 	"fmt"
 	"io/ioutil"
 	"path/filepath"
+	"runtime"
 	"strings"
 
 	containertypes "github.com/docker/docker/api/types/container"
@@ -108,6 +109,11 @@
 		if !mount.Writable {
 			m.Options = append(m.Options, "ro")
 		}
+		if img.OS != runtime.GOOS {
+			m.Type = "bind"
+			m.Options = append(m.Options, "rbind")
+			m.Options = append(m.Options, fmt.Sprintf("uvmpath=/tmp/gcs/%s/binds", c.ID))
+		}
 		s.Mounts = append(s.Mounts, m)
 	}
 
@@ -132,9 +138,9 @@
 	max := len(img.RootFS.DiffIDs)
 	for i := 1; i <= max; i++ {
 		img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i]
-		layerPath, err := layer.GetLayerPath(daemon.stores[c.Platform].layerStore, img.RootFS.ChainID())
+		layerPath, err := layer.GetLayerPath(daemon.stores[c.OS].layerStore, img.RootFS.ChainID())
 		if err != nil {
-			return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.stores[c.Platform].layerStore, img.RootFS.ChainID(), err)
+			return nil, fmt.Errorf("failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.stores[c.OS].layerStore, img.RootFS.ChainID(), err)
 		}
 		// Reverse order, expecting parent most first
 		s.Windows.LayerFolders = append([]string{layerPath}, s.Windows.LayerFolders...)
@@ -233,7 +239,7 @@
 
 	s.Root.Readonly = false // Windows does not support a read-only root filesystem
 	if !isHyperV {
-		s.Root.Path = c.BaseFS // This is not set for Hyper-V containers
+		s.Root.Path = c.BaseFS.Path() // This is not set for Hyper-V containers
 		if !strings.HasSuffix(s.Root.Path, `\`) {
 			s.Root.Path = s.Root.Path + `\` // Ensure a correctly formatted volume GUID path \\?\Volume{GUID}\
 		}
diff --git a/daemon/pause.go b/daemon/pause.go
index 3fecea5..b751cc4 100644
--- a/daemon/pause.go
+++ b/daemon/pause.go
@@ -1,9 +1,11 @@
 package daemon
 
 import (
+	"context"
 	"fmt"
 
 	"github.com/docker/docker/container"
+	"github.com/sirupsen/logrus"
 )
 
 // ContainerPause pauses a container
@@ -33,7 +35,7 @@
 
 	// We cannot Pause the container which is already paused
 	if container.Paused {
-		return fmt.Errorf("Container %s is already paused", container.ID)
+		return errNotPaused(container.ID)
 	}
 
 	// We cannot Pause the container which is restarting
@@ -41,9 +43,18 @@
 		return errContainerIsRestarting(container.ID)
 	}
 
-	if err := daemon.containerd.Pause(container.ID); err != nil {
+	if err := daemon.containerd.Pause(context.Background(), container.ID); err != nil {
 		return fmt.Errorf("Cannot pause container %s: %s", container.ID, err)
 	}
 
+	container.Paused = true
+	daemon.setStateCounter(container)
+	daemon.updateHealthMonitor(container)
+	daemon.LogContainerEvent(container, "pause")
+
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
+		logrus.WithError(err).Warn("could not save container to disk")
+	}
+
 	return nil
 }
diff --git a/daemon/prune.go b/daemon/prune.go
index 66eca2b..1850752 100644
--- a/daemon/prune.go
+++ b/daemon/prune.go
@@ -140,7 +140,7 @@
 			if err != nil {
 				logrus.Warnf("could not determine size of volume %s: %v", name, err)
 			}
-			err = daemon.volumes.Remove(v)
+			err = daemon.volumeRm(v)
 			if err != nil {
 				logrus.Warnf("could not remove volume %s: %v", name, err)
 				return nil
@@ -182,7 +182,7 @@
 	rep := &types.ImagesPruneReport{}
 
 	danglingOnly := true
-	if pruneFilters.Include("dangling") {
+	if pruneFilters.Contains("dangling") {
 		if pruneFilters.ExactMatch("dangling", "false") || pruneFilters.ExactMatch("dangling", "0") {
 			danglingOnly = false
 		} else if !pruneFilters.ExactMatch("dangling", "true") && !pruneFilters.ExactMatch("dangling", "1") {
@@ -440,7 +440,7 @@
 
 func getUntilFromPruneFilters(pruneFilters filters.Args) (time.Time, error) {
 	until := time.Time{}
-	if !pruneFilters.Include("until") {
+	if !pruneFilters.Contains("until") {
 		return until, nil
 	}
 	untilFilters := pruneFilters.Get("until")
@@ -464,8 +464,8 @@
 		return false
 	}
 	// By default MatchKVList will return true if field (like 'label!') does not exist
-	// So we have to add additional Include("label!") check
-	if pruneFilters.Include("label!") {
+	// So we have to add additional Contains("label!") check
+	if pruneFilters.Contains("label!") {
 		if pruneFilters.MatchKVList("label!", labels) {
 			return false
 		}
diff --git a/daemon/reload.go b/daemon/reload.go
index a6674ec..0d16bc8 100644
--- a/daemon/reload.go
+++ b/daemon/reload.go
@@ -6,7 +6,6 @@
 
 	"github.com/docker/docker/daemon/config"
 	"github.com/docker/docker/daemon/discovery"
-	"github.com/docker/docker/libcontainerd"
 	"github.com/sirupsen/logrus"
 )
 
@@ -303,9 +302,6 @@
 	// update corresponding configuration
 	if conf.IsValueSet("live-restore") {
 		daemon.configStore.LiveRestoreEnabled = conf.LiveRestoreEnabled
-		if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(conf.LiveRestoreEnabled)); err != nil {
-			return err
-		}
 	}
 
 	// prepare reload event attributes with updatable configurations
diff --git a/daemon/reload_test.go b/daemon/reload_test.go
index bf11b6b..96b1a24 100644
--- a/daemon/reload_test.go
+++ b/daemon/reload_test.go
@@ -1,5 +1,3 @@
-// +build !solaris
-
 package daemon
 
 import (
@@ -12,6 +10,7 @@
 	"github.com/docker/docker/pkg/discovery"
 	_ "github.com/docker/docker/pkg/discovery/memory"
 	"github.com/docker/docker/registry"
+	"github.com/stretchr/testify/assert"
 )
 
 func TestDaemonReloadLabels(t *testing.T) {
@@ -46,8 +45,9 @@
 		configStore: &config.Config{},
 	}
 
+	var err error
 	// Initialize daemon with some registries.
-	daemon.RegistryService = registry.NewService(registry.ServiceOptions{
+	daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{
 		AllowNondistributableArtifacts: []string{
 			"127.0.0.0/8",
 			"10.10.1.11:5000",
@@ -56,6 +56,9 @@
 			"docker2.com", // This will be removed during reload.
 		},
 	})
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	registries := []string{
 		"127.0.0.0/8",
@@ -85,20 +88,17 @@
 	for _, value := range serviceConfig.AllowNondistributableArtifactsCIDRs {
 		actual = append(actual, value.String())
 	}
-	for _, value := range serviceConfig.AllowNondistributableArtifactsHostnames {
-		actual = append(actual, value)
-	}
+	actual = append(actual, serviceConfig.AllowNondistributableArtifactsHostnames...)
 
 	sort.Strings(registries)
 	sort.Strings(actual)
-	if !reflect.DeepEqual(registries, actual) {
-		t.Fatalf("expected %v, got %v\n", registries, actual)
-	}
+	assert.Equal(t, registries, actual)
 }
 
 func TestDaemonReloadMirrors(t *testing.T) {
 	daemon := &Daemon{}
-	daemon.RegistryService = registry.NewService(registry.ServiceOptions{
+	var err error
+	daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{
 		InsecureRegistries: []string{},
 		Mirrors: []string{
 			"https://mirror.test1.com",
@@ -106,6 +106,9 @@
 			"https://mirror.test3.com", // this will be removed when reloading
 		},
 	})
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	daemon.configStore = &config.Config{}
 
@@ -191,8 +194,9 @@
 
 func TestDaemonReloadInsecureRegistries(t *testing.T) {
 	daemon := &Daemon{}
+	var err error
 	// initialize daemon with existing insecure registries: "127.0.0.0/8", "10.10.1.11:5000", "10.10.1.22:5000"
-	daemon.RegistryService = registry.NewService(registry.ServiceOptions{
+	daemon.RegistryService, err = registry.NewService(registry.ServiceOptions{
 		InsecureRegistries: []string{
 			"127.0.0.0/8",
 			"10.10.1.11:5000",
@@ -201,6 +205,9 @@
 			"docker2.com", // this will be removed when reloading
 		},
 	})
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	daemon.configStore = &config.Config{}
 
diff --git a/daemon/resize.go b/daemon/resize.go
index 0923d0f..a992a07 100644
--- a/daemon/resize.go
+++ b/daemon/resize.go
@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"context"
 	"fmt"
 
 	"github.com/docker/docker/libcontainerd"
@@ -18,7 +19,7 @@
 		return errNotRunning(container.ID)
 	}
 
-	if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil {
+	if err = daemon.containerd.ResizeTerminal(context.Background(), container.ID, libcontainerd.InitProcessName, width, height); err == nil {
 		attributes := map[string]string{
 			"height": fmt.Sprintf("%d", height),
 			"width":  fmt.Sprintf("%d", width),
@@ -36,5 +37,5 @@
 	if err != nil {
 		return err
 	}
-	return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height)
+	return daemon.containerd.ResizeTerminal(context.Background(), ec.ContainerID, ec.ID, width, height)
 }
diff --git a/daemon/search.go b/daemon/search.go
index 540e247..25744cb 100644
--- a/daemon/search.go
+++ b/daemon/search.go
@@ -23,7 +23,7 @@
 	authConfig *types.AuthConfig,
 	headers map[string][]string) (*registrytypes.SearchResults, error) {
 
-	searchFilters, err := filters.FromParam(filtersArgs)
+	searchFilters, err := filters.FromJSON(filtersArgs)
 	if err != nil {
 		return nil, err
 	}
@@ -33,21 +33,21 @@
 
 	var isAutomated, isOfficial bool
 	var hasStarFilter = 0
-	if searchFilters.Include("is-automated") {
+	if searchFilters.Contains("is-automated") {
 		if searchFilters.UniqueExactMatch("is-automated", "true") {
 			isAutomated = true
 		} else if !searchFilters.UniqueExactMatch("is-automated", "false") {
 			return nil, invalidFilter{"is-automated", searchFilters.Get("is-automated")}
 		}
 	}
-	if searchFilters.Include("is-official") {
+	if searchFilters.Contains("is-official") {
 		if searchFilters.UniqueExactMatch("is-official", "true") {
 			isOfficial = true
 		} else if !searchFilters.UniqueExactMatch("is-official", "false") {
 			return nil, invalidFilter{"is-official", searchFilters.Get("is-official")}
 		}
 	}
-	if searchFilters.Include("stars") {
+	if searchFilters.Contains("stars") {
 		hasStars := searchFilters.Get("stars")
 		for _, hasStar := range hasStars {
 			iHasStar, err := strconv.Atoi(hasStar)
@@ -67,17 +67,17 @@
 
 	filteredResults := []registrytypes.SearchResult{}
 	for _, result := range unfilteredResult.Results {
-		if searchFilters.Include("is-automated") {
+		if searchFilters.Contains("is-automated") {
 			if isAutomated != result.IsAutomated {
 				continue
 			}
 		}
-		if searchFilters.Include("is-official") {
+		if searchFilters.Contains("is-official") {
 			if isOfficial != result.IsOfficial {
 				continue
 			}
 		}
-		if searchFilters.Include("stars") {
+		if searchFilters.Contains("stars") {
 			if result.StarCount < hasStarFilter {
 				continue
 			}
diff --git a/daemon/start.go b/daemon/start.go
index 55438cf..3b9f0f9 100644
--- a/daemon/start.go
+++ b/daemon/start.go
@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"context"
 	"runtime"
 	"time"
 
@@ -79,7 +80,7 @@
 
 	// check if hostConfig is in line with the current system settings.
 	// It may happen cgroups are umounted or the like.
-	if _, err = daemon.verifyContainerSettings(container.Platform, container.HostConfig, nil, false); err != nil {
+	if _, err = daemon.verifyContainerSettings(container.OS, container.HostConfig, nil, false); err != nil {
 		return validationError{err}
 	}
 	// Adapt for old containers in case we have updates in this function and
@@ -113,6 +114,11 @@
 		return stateConflictError{errors.New("container is marked for removal and cannot be started")}
 	}
 
+	if checkpointDir != "" {
+		// TODO(mlaventure): how would we support that?
+		return notAllowedError{errors.New("custom checkpointdir is not supported")}
+	}
+
 	// if we encounter an error during start we need to ensure that any other
 	// setup has been cleaned up properly
 	defer func() {
@@ -152,28 +158,56 @@
 		return systemError{err}
 	}
 
-	createOptions, err := daemon.getLibcontainerdCreateOptions(container)
-	if err != nil {
-		return err
-	}
-
 	if resetRestartManager {
 		container.ResetRestartManager(true)
 	}
 
-	if checkpointDir == "" {
-		checkpointDir = container.CheckpointDir()
-	}
-
 	if daemon.saveApparmorConfig(container); err != nil {
 		return err
 	}
 
-	if err := daemon.containerd.Create(container.ID, checkpoint, checkpointDir, *spec, container.InitializeStdio, createOptions...); err != nil {
-		return translateContainerdStartErr(container.Path, container.SetExitCode, err)
-
+	if checkpoint != "" {
+		checkpointDir, err = getCheckpointDir(checkpointDir, checkpoint, container.Name, container.ID, container.CheckpointDir(), false)
+		if err != nil {
+			return err
+		}
 	}
 
+	createOptions, err := daemon.getLibcontainerdCreateOptions(container)
+	if err != nil {
+		return err
+	}
+
+	err = daemon.containerd.Create(context.Background(), container.ID, spec, createOptions)
+	if err != nil {
+		return translateContainerdStartErr(container.Path, container.SetExitCode, err)
+	}
+
+	// TODO(mlaventure): we need to specify checkpoint options here
+	pid, err := daemon.containerd.Start(context.Background(), container.ID, checkpointDir,
+		container.StreamConfig.Stdin() != nil || container.Config.Tty,
+		container.InitializeStdio)
+	if err != nil {
+		if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
+			logrus.WithError(err).WithField("container", container.ID).
+				Error("failed to delete failed start container")
+		}
+		return translateContainerdStartErr(container.Path, container.SetExitCode, err)
+	}
+
+	container.SetRunning(pid, true)
+	container.HasBeenManuallyStopped = false
+	container.HasBeenStartedBefore = true
+	daemon.setStateCounter(container)
+
+	daemon.initHealthMonitor(container)
+
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
+		logrus.WithError(err).WithField("container", container.ID).
+			Errorf("failed to store container")
+	}
+
+	daemon.LogContainerEvent(container, "start")
 	containerActions.WithValues("start").UpdateSince(start)
 
 	return nil
@@ -191,7 +225,7 @@
 	if err := daemon.conditionalUnmountOnCleanup(container); err != nil {
 		// FIXME: remove once reference counting for graphdrivers has been refactored
 		// Ensure that all the mounts are gone
-		if mountid, err := daemon.stores[container.Platform].layerStore.GetMountID(container.ID); err == nil {
+		if mountid, err := daemon.stores[container.OS].layerStore.GetMountID(container.ID); err == nil {
 			daemon.cleanupMountsByID(mountid)
 		}
 	}
@@ -204,10 +238,15 @@
 		daemon.unregisterExecCommand(container, eConfig)
 	}
 
-	if container.BaseFS != "" {
+	if container.BaseFS != nil && container.BaseFS.Path() != "" {
 		if err := container.UnmountVolumes(daemon.LogVolumeEvent); err != nil {
 			logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err)
 		}
 	}
+
 	container.CancelAttachContext()
+
+	if err := daemon.containerd.Delete(context.Background(), container.ID); err != nil {
+		logrus.Errorf("%s cleanup: failed to delete container from containerd: %v", container.ID, err)
+	}
 }
diff --git a/daemon/start_unix.go b/daemon/start_unix.go
index 87ab085..a8402bb 100644
--- a/daemon/start_unix.go
+++ b/daemon/start_unix.go
@@ -3,29 +3,54 @@
 package daemon
 
 import (
+	"fmt"
+	"os/exec"
+	"path/filepath"
+
+	"github.com/containerd/containerd/linux/runcopts"
 	"github.com/docker/docker/container"
-	"github.com/docker/docker/libcontainerd"
 	"github.com/pkg/errors"
 )
 
-// getLibcontainerdCreateOptions callers must hold a lock on the container
-func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) {
-	createOptions := []libcontainerd.CreateOption{}
+func (daemon *Daemon) getRuntimeScript(container *container.Container) (string, error) {
+	name := container.HostConfig.Runtime
+	rt := daemon.configStore.GetRuntime(name)
+	if rt == nil {
+		return "", validationError{errors.Errorf("no such runtime '%s'", name)}
+	}
 
+	if len(rt.Args) > 0 {
+		// First check that the target exist, as using it in a script won't
+		// give us the right error
+		if _, err := exec.LookPath(rt.Path); err != nil {
+			return "", translateContainerdStartErr(container.Path, container.SetExitCode, err)
+		}
+		return filepath.Join(daemon.configStore.Root, "runtimes", name), nil
+	}
+	return rt.Path, nil
+}
+
+// getLibcontainerdCreateOptions callers must hold a lock on the container
+func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (interface{}, error) {
 	// Ensure a runtime has been assigned to this container
 	if container.HostConfig.Runtime == "" {
 		container.HostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName()
 		container.CheckpointTo(daemon.containersReplica)
 	}
 
-	rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime)
-	if rt == nil {
-		return nil, validationError{errors.Errorf("no such runtime '%s'", container.HostConfig.Runtime)}
+	path, err := daemon.getRuntimeScript(container)
+	if err != nil {
+		return nil, err
 	}
-	if UsingSystemd(daemon.configStore) {
-		rt.Args = append(rt.Args, "--systemd-cgroup=true")
+	opts := &runcopts.RuncOptions{
+		Runtime: path,
+		RuntimeRoot: filepath.Join(daemon.configStore.ExecRoot,
+			fmt.Sprintf("runtime-%s", container.HostConfig.Runtime)),
 	}
-	createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args))
 
-	return createOptions, nil
+	if UsingSystemd(daemon.configStore) {
+		opts.SystemdCgroup = true
+	}
+
+	return opts, nil
 }
diff --git a/daemon/start_windows.go b/daemon/start_windows.go
index 9082a93..55588be 100644
--- a/daemon/start_windows.go
+++ b/daemon/start_windows.go
@@ -3,14 +3,11 @@
 import (
 	"github.com/Microsoft/opengcs/client"
 	"github.com/docker/docker/container"
-	"github.com/docker/docker/libcontainerd"
 )
 
-func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) ([]libcontainerd.CreateOption, error) {
-	createOptions := []libcontainerd.CreateOption{}
-
+func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (interface{}, error) {
 	// LCOW options.
-	if container.Platform == "linux" {
+	if container.OS == "linux" {
 		config := &client.Config{}
 		if err := config.GenerateDefault(daemon.configStore.GraphOptions); err != nil {
 			return nil, err
@@ -33,11 +30,9 @@
 		if err := config.Validate(); err != nil {
 			return nil, err
 		}
-		lcowOpts := &libcontainerd.LCOWOption{
-			Config: config,
-		}
-		createOptions = append(createOptions, lcowOpts)
+
+		return config, nil
 	}
 
-	return createOptions, nil
+	return nil, nil
 }
diff --git a/daemon/stats.go b/daemon/stats.go
index 926f32e..ec77ac0 100644
--- a/daemon/stats.go
+++ b/daemon/stats.go
@@ -3,7 +3,6 @@
 import (
 	"encoding/json"
 	"errors"
-	"fmt"
 	"runtime"
 	"time"
 
@@ -20,9 +19,6 @@
 // ContainerStats writes information about the container to the stream
 // given in the config object.
 func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error {
-	if runtime.GOOS == "solaris" {
-		return fmt.Errorf("%+v does not support stats", runtime.GOOS)
-	}
 	// Engine API version (used for backwards compatibility)
 	apiVersion := config.Version
 
diff --git a/daemon/stats/collector.go b/daemon/stats/collector.go
index c930bc7..f13a804 100644
--- a/daemon/stats/collector.go
+++ b/daemon/stats/collector.go
@@ -1,5 +1,3 @@
-// +build !solaris
-
 package stats
 
 import (
diff --git a/daemon/stats/collector_solaris.go b/daemon/stats/collector_solaris.go
deleted file mode 100644
index 3699d08..0000000
--- a/daemon/stats/collector_solaris.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package stats
-
-import (
-	"github.com/docker/docker/container"
-)
-
-// platformNewStatsCollector performs platform specific initialisation of the
-// Collector structure. This is a no-op on Windows.
-func platformNewStatsCollector(s *Collector) {
-}
-
-// Collect registers the container with the collector and adds it to
-// the event loop for collection on the specified interval returning
-// a channel for the subscriber to receive on.
-// Currently not supported on Solaris
-func (s *Collector) Collect(c *container.Container) chan interface{} {
-	return nil
-}
-
-// StopCollection closes the channels for all subscribers and removes
-// the container from metrics collection.
-// Currently not supported on Solaris
-func (s *Collector) StopCollection(c *container.Container) {
-}
-
-// Unsubscribe removes a specific subscriber from receiving updates for a container's stats.
-// Currently not supported on Solaris
-func (s *Collector) Unsubscribe(c *container.Container, ch chan interface{}) {
-}
diff --git a/daemon/stats/collector_unix.go b/daemon/stats/collector_unix.go
index cd522e0..6b1318a 100644
--- a/daemon/stats/collector_unix.go
+++ b/daemon/stats/collector_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!solaris
+// +build !windows
 
 package stats
 
diff --git a/daemon/stop.go b/daemon/stop.go
index c43fbfa..7eadba7 100644
--- a/daemon/stop.go
+++ b/daemon/stop.go
@@ -78,7 +78,7 @@
 		// 3. If it doesn't, then send SIGKILL
 		if err := daemon.Kill(container); err != nil {
 			// Wait without a timeout, ignore result.
-			_ = <-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning)
+			<-container.Wait(context.Background(), containerpkg.WaitConditionNotRunning)
 			logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it
 		}
 	}
diff --git a/api/fixtures/keyfile b/daemon/testdata/keyfile
similarity index 100%
rename from api/fixtures/keyfile
rename to daemon/testdata/keyfile
diff --git a/daemon/top_unix.go b/daemon/top_unix.go
index ec2b1da..cbb993f 100644
--- a/daemon/top_unix.go
+++ b/daemon/top_unix.go
@@ -3,6 +3,7 @@
 package daemon
 
 import (
+	"context"
 	"fmt"
 	"os/exec"
 	"regexp"
@@ -16,6 +17,7 @@
 	// NOTE: \\s does not detect unicode whitespaces.
 	// So we use fieldsASCII instead of strings.Fields in parsePSOutput.
 	// See https://github.com/docker/docker/pull/24358
+	// nolint: gosimple
 	re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)")
 	for _, group := range re.FindAllStringSubmatch(psArgs, -1) {
 		if len(group) >= 3 {
@@ -49,16 +51,16 @@
 	procList.Processes = append(procList.Processes, process)
 }
 
-func hasPid(pids []int, pid int) bool {
-	for _, i := range pids {
-		if i == pid {
+func hasPid(procs []uint32, pid int) bool {
+	for _, p := range procs {
+		if int(p) == pid {
 			return true
 		}
 	}
 	return false
 }
 
-func parsePSOutput(output []byte, pids []int) (*container.ContainerTopOKBody, error) {
+func parsePSOutput(output []byte, procs []uint32) (*container.ContainerTopOKBody, error) {
 	procList := &container.ContainerTopOKBody{}
 
 	lines := strings.Split(string(output), "\n")
@@ -100,7 +102,7 @@
 			return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err)
 		}
 
-		if hasPid(pids, p) {
+		if hasPid(procs, p) {
 			preContainedPidFlag = true
 			appendProcess2ProcList(procList, fields)
 			continue
@@ -137,7 +139,7 @@
 		return nil, errContainerIsRestarting(container.ID)
 	}
 
-	pids, err := daemon.containerd.GetPidsForContainer(container.ID)
+	procs, err := daemon.containerd.ListPids(context.Background(), container.ID)
 	if err != nil {
 		return nil, err
 	}
@@ -146,7 +148,7 @@
 	if err != nil {
 		return nil, fmt.Errorf("Error running ps: %v", err)
 	}
-	procList, err := parsePSOutput(output, pids)
+	procList, err := parsePSOutput(output, procs)
 	if err != nil {
 		return nil, err
 	}
diff --git a/daemon/top_unix_test.go b/daemon/top_unix_test.go
index 9a3749f..4cc4a20 100644
--- a/daemon/top_unix_test.go
+++ b/daemon/top_unix_test.go
@@ -36,7 +36,7 @@
 func TestContainerTopParsePSOutput(t *testing.T) {
 	tests := []struct {
 		output      []byte
-		pids        []int
+		pids        []uint32
 		errExpected bool
 	}{
 		{[]byte(`  PID COMMAND
@@ -44,26 +44,26 @@
    43 bar
 		- -
   100 baz
-`), []int{42, 43}, false},
+`), []uint32{42, 43}, false},
 		{[]byte(`  UID COMMAND
    42 foo
    43 bar
 		- -
   100 baz
-`), []int{42, 43}, true},
+`), []uint32{42, 43}, true},
 		// unicode space (U+2003, 0xe2 0x80 0x83)
 		{[]byte(` PID COMMAND
    42 foo
    43 bar
 		- -
   100 baz
-`), []int{42, 43}, true},
+`), []uint32{42, 43}, true},
 		// the first space is U+2003, the second one is ascii.
 		{[]byte(` PID COMMAND
    42 foo
    43 bar
   100 baz
-`), []int{42, 43}, true},
+`), []uint32{42, 43}, true},
 	}
 
 	for _, f := range tests {
diff --git a/daemon/top_windows.go b/daemon/top_windows.go
index 000720b..40828ff 100644
--- a/daemon/top_windows.go
+++ b/daemon/top_windows.go
@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"context"
 	"errors"
 	"fmt"
 	"time"
@@ -34,7 +35,15 @@
 		return nil, err
 	}
 
-	s, err := daemon.containerd.Summary(container.ID)
+	if !container.IsRunning() {
+		return nil, errNotRunning(container.ID)
+	}
+
+	if container.IsRestarting() {
+		return nil, errContainerIsRestarting(container.ID)
+	}
+
+	s, err := daemon.containerd.Summary(context.Background(), container.ID)
 	if err != nil {
 		return nil, err
 	}
@@ -49,5 +58,6 @@
 			fmt.Sprintf("%02d:%02d:%02d.%03d", int(d.Hours()), int(d.Minutes())%60, int(d.Seconds())%60, int(d.Nanoseconds()/1000000)%1000),
 			units.HumanSize(float64(j.MemoryWorkingSetPrivateBytes))})
 	}
+
 	return procList, nil
 }
diff --git a/daemon/trustkey.go b/daemon/trustkey.go
new file mode 100644
index 0000000..cb33146
--- /dev/null
+++ b/daemon/trustkey.go
@@ -0,0 +1,57 @@
+package daemon
+
+import (
+	"encoding/json"
+	"encoding/pem"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/system"
+	"github.com/docker/libtrust"
+)
+
+// LoadOrCreateTrustKey attempts to load the libtrust key at the given path,
+// otherwise generates a new one
+// TODO: this should use more of libtrust.LoadOrCreateTrustKey which may need
+// a refactor or this function to be moved into libtrust
+func loadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) {
+	err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700, "")
+	if err != nil {
+		return nil, err
+	}
+	trustKey, err := libtrust.LoadKeyFile(trustKeyPath)
+	if err == libtrust.ErrKeyFileDoesNotExist {
+		trustKey, err = libtrust.GenerateECP256PrivateKey()
+		if err != nil {
+			return nil, fmt.Errorf("Error generating key: %s", err)
+		}
+		encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath))
+		if err != nil {
+			return nil, fmt.Errorf("Error serializing key: %s", err)
+		}
+		if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil {
+			return nil, fmt.Errorf("Error saving key file: %s", err)
+		}
+	} else if err != nil {
+		return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err)
+	}
+	return trustKey, nil
+}
+
+func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) {
+	if ext == ".json" || ext == ".jwk" {
+		encoded, err = json.Marshal(key)
+		if err != nil {
+			return nil, fmt.Errorf("unable to encode private key JWK: %s", err)
+		}
+	} else {
+		pemBlock, err := key.PEMBlock()
+		if err != nil {
+			return nil, fmt.Errorf("unable to encode private key PEM: %s", err)
+		}
+		encoded = pem.EncodeToMemory(pemBlock)
+	}
+	return
+}
diff --git a/daemon/trustkey_test.go b/daemon/trustkey_test.go
new file mode 100644
index 0000000..2ade2aa
--- /dev/null
+++ b/daemon/trustkey_test.go
@@ -0,0 +1,72 @@
+package daemon
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"testing"
+
+	"github.com/docker/docker/internal/testutil"
+	"github.com/gotestyourself/gotestyourself/fs"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+// LoadOrCreateTrustKey
+func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) {
+	tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test")
+	require.NoError(t, err)
+	defer os.RemoveAll(tmpKeyFolderPath)
+
+	tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile")
+	require.NoError(t, err)
+
+	_, err = loadOrCreateTrustKey(tmpKeyFile.Name())
+	testutil.ErrorContains(t, err, "Error loading key file")
+}
+
+func TestLoadOrCreateTrustKeyCreateKeyWhenFileDoesNotExist(t *testing.T) {
+	tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test")
+	defer tmpKeyFolderPath.Remove()
+
+	// Without the need to create the folder hierarchy
+	tmpKeyFile := tmpKeyFolderPath.Join("keyfile")
+
+	key, err := loadOrCreateTrustKey(tmpKeyFile)
+	require.NoError(t, err)
+	assert.NotNil(t, key)
+
+	_, err = os.Stat(tmpKeyFile)
+	require.NoError(t, err, "key file doesn't exist")
+}
+
+func TestLoadOrCreateTrustKeyCreateKeyWhenDirectoryDoesNotExist(t *testing.T) {
+	tmpKeyFolderPath := fs.NewDir(t, "api-trustkey-test")
+	defer tmpKeyFolderPath.Remove()
+	tmpKeyFile := tmpKeyFolderPath.Join("folder/hierarchy/keyfile")
+
+	key, err := loadOrCreateTrustKey(tmpKeyFile)
+	require.NoError(t, err)
+	assert.NotNil(t, key)
+
+	_, err = os.Stat(tmpKeyFile)
+	require.NoError(t, err, "key file doesn't exist")
+}
+
+func TestLoadOrCreateTrustKeyCreateKeyNoPath(t *testing.T) {
+	defer os.Remove("keyfile")
+	key, err := loadOrCreateTrustKey("keyfile")
+	require.NoError(t, err)
+	assert.NotNil(t, key)
+
+	_, err = os.Stat("keyfile")
+	require.NoError(t, err, "key file doesn't exist")
+}
+
+func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) {
+	tmpKeyFile := filepath.Join("testdata", "keyfile")
+	key, err := loadOrCreateTrustKey(tmpKeyFile)
+	require.NoError(t, err)
+	expected := "AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY"
+	assert.Contains(t, key.String(), expected)
+}
diff --git a/daemon/unpause.go b/daemon/unpause.go
index e66b386..2e41f20 100644
--- a/daemon/unpause.go
+++ b/daemon/unpause.go
@@ -1,9 +1,11 @@
 package daemon
 
 import (
+	"context"
 	"fmt"
 
 	"github.com/docker/docker/container"
+	"github.com/sirupsen/logrus"
 )
 
 // ContainerUnpause unpauses a container
@@ -30,9 +32,18 @@
 		return fmt.Errorf("Container %s is not paused", container.ID)
 	}
 
-	if err := daemon.containerd.Resume(container.ID); err != nil {
+	if err := daemon.containerd.Resume(context.Background(), container.ID); err != nil {
 		return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err)
 	}
 
+	container.Paused = false
+	daemon.setStateCounter(container)
+	daemon.updateHealthMonitor(container)
+	daemon.LogContainerEvent(container, "unpause")
+
+	if err := container.CheckpointTo(daemon.containersReplica); err != nil {
+		logrus.WithError(err).Warnf("could not save container to disk")
+	}
+
 	return nil
 }
diff --git a/daemon/update.go b/daemon/update.go
index 42d732f..0a79c19 100644
--- a/daemon/update.go
+++ b/daemon/update.go
@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"context"
 	"fmt"
 
 	"github.com/docker/docker/api/types/container"
@@ -16,7 +17,7 @@
 		return container.ContainerUpdateOKBody{Warnings: warnings}, err
 	}
 
-	warnings, err = daemon.verifyContainerSettings(c.Platform, hostConfig, nil, true)
+	warnings, err = daemon.verifyContainerSettings(c.OS, hostConfig, nil, true)
 	if err != nil {
 		return container.ContainerUpdateOKBody{Warnings: warnings}, validationError{err}
 	}
@@ -76,7 +77,7 @@
 	// If container is running (including paused), we need to update configs
 	// to the real world.
 	if container.IsRunning() && !container.IsRestarting() {
-		if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil {
+		if err := daemon.containerd.UpdateResources(context.Background(), container.ID, toContainerdResources(hostConfig.Resources)); err != nil {
 			restoreConfig = true
 			// TODO: it would be nice if containerd responded with better errors here so we can classify this better.
 			return errCannotUpdate(container.ID, systemError{err})
diff --git a/daemon/update_linux.go b/daemon/update_linux.go
index c128967..41d3b53 100644
--- a/daemon/update_linux.go
+++ b/daemon/update_linux.go
@@ -7,26 +7,43 @@
 
 	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/libcontainerd"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 
-func toContainerdResources(resources container.Resources) libcontainerd.Resources {
+func toContainerdResources(resources container.Resources) *libcontainerd.Resources {
 	var r libcontainerd.Resources
-	r.BlkioWeight = uint64(resources.BlkioWeight)
-	r.CpuShares = uint64(resources.CPUShares)
+
+	r.BlockIO = &specs.LinuxBlockIO{
+		Weight: &resources.BlkioWeight,
+	}
+
+	shares := uint64(resources.CPUShares)
+	r.CPU = &specs.LinuxCPU{
+		Shares: &shares,
+		Cpus:   resources.CpusetCpus,
+		Mems:   resources.CpusetMems,
+	}
+
+	var (
+		period uint64
+		quota  int64
+	)
 	if resources.NanoCPUs != 0 {
-		r.CpuPeriod = uint64(100 * time.Millisecond / time.Microsecond)
-		r.CpuQuota = uint64(resources.NanoCPUs) * r.CpuPeriod / 1e9
-	} else {
-		r.CpuPeriod = uint64(resources.CPUPeriod)
-		r.CpuQuota = uint64(resources.CPUQuota)
+		period = uint64(100 * time.Millisecond / time.Microsecond)
+		quota = resources.NanoCPUs * int64(period) / 1e9
 	}
-	r.CpusetCpus = resources.CpusetCpus
-	r.CpusetMems = resources.CpusetMems
-	r.MemoryLimit = uint64(resources.Memory)
+	r.CPU.Period = &period
+	r.CPU.Quota = &quota
+
+	r.Memory = &specs.LinuxMemory{
+		Limit:       &resources.Memory,
+		Reservation: &resources.MemoryReservation,
+		Kernel:      &resources.KernelMemory,
+	}
+
 	if resources.MemorySwap > 0 {
-		r.MemorySwap = uint64(resources.MemorySwap)
+		r.Memory.Swap = &resources.MemorySwap
 	}
-	r.MemoryReservation = uint64(resources.MemoryReservation)
-	r.KernelMemoryLimit = uint64(resources.KernelMemory)
-	return r
+
+	return &r
 }
diff --git a/daemon/update_windows.go b/daemon/update_windows.go
index 0146626..4f85f41 100644
--- a/daemon/update_windows.go
+++ b/daemon/update_windows.go
@@ -7,7 +7,7 @@
 	"github.com/docker/docker/libcontainerd"
 )
 
-func toContainerdResources(resources container.Resources) libcontainerd.Resources {
-	var r libcontainerd.Resources
-	return r
+func toContainerdResources(resources container.Resources) *libcontainerd.Resources {
+	// We don't support update, so do nothing
+	return nil
 }
diff --git a/daemon/volumes.go b/daemon/volumes.go
index b2f17df..6a42ec2 100644
--- a/daemon/volumes.go
+++ b/daemon/volumes.go
@@ -75,6 +75,7 @@
 func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) {
 	binds := map[string]bool{}
 	mountPoints := map[string]*volume.MountPoint{}
+	parser := volume.NewParser(container.OS)
 	defer func() {
 		// clean up the container mountpoints once return with error
 		if retErr != nil {
@@ -103,7 +104,7 @@
 
 	// 2. Read volumes from other containers.
 	for _, v := range hostConfig.VolumesFrom {
-		containerID, mode, err := volume.ParseVolumesFrom(v)
+		containerID, mode, err := parser.ParseVolumesFrom(v)
 		if err != nil {
 			return err
 		}
@@ -118,7 +119,7 @@
 				Type:        m.Type,
 				Name:        m.Name,
 				Source:      m.Source,
-				RW:          m.RW && volume.ReadWrite(mode),
+				RW:          m.RW && parser.ReadWrite(mode),
 				Driver:      m.Driver,
 				Destination: m.Destination,
 				Propagation: m.Propagation,
@@ -140,7 +141,7 @@
 
 	// 3. Read bind mounts
 	for _, b := range hostConfig.Binds {
-		bind, err := volume.ParseMountRaw(b, hostConfig.VolumeDriver)
+		bind, err := parser.ParseMountRaw(b, hostConfig.VolumeDriver)
 		if err != nil {
 			return err
 		}
@@ -172,7 +173,7 @@
 	}
 
 	for _, cfg := range hostConfig.Mounts {
-		mp, err := volume.ParseMountSpec(cfg)
+		mp, err := parser.ParseMountSpec(cfg)
 		if err != nil {
 			return validationError{err}
 		}
@@ -206,6 +207,9 @@
 			}); ok {
 				mp.Source = cv.CachedPath()
 			}
+			if mp.Driver == volume.DefaultDriverName {
+				setBindModeIfNull(mp)
+			}
 		}
 
 		binds[mp.Destination] = true
@@ -217,7 +221,7 @@
 
 	// 4. Cleanup old volumes that are about to be reassigned.
 	for _, m := range mountPoints {
-		if m.BackwardsCompatible() {
+		if parser.IsBackwardCompatible(m) {
 			if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil {
 				daemon.volumes.Dereference(mp.Volume, container.ID)
 			}
@@ -252,6 +256,8 @@
 	container.Lock()
 	defer container.Unlock()
 
+	parser := volume.NewParser(container.OS)
+
 	maybeUpdate := make(map[string]bool)
 	for _, mp := range container.MountPoints {
 		if mp.Spec.Source != "" && mp.Type != "" {
@@ -270,7 +276,7 @@
 
 	binds := make(map[string]*volume.MountPoint, len(container.HostConfig.Binds))
 	for _, rawSpec := range container.HostConfig.Binds {
-		mp, err := volume.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver)
+		mp, err := parser.ParseMountRaw(rawSpec, container.HostConfig.VolumeDriver)
 		if err != nil {
 			logrus.WithError(err).Error("Got unexpected error while re-parsing raw volume spec during spec backport")
 			continue
@@ -280,7 +286,7 @@
 
 	volumesFrom := make(map[string]volume.MountPoint)
 	for _, fromSpec := range container.HostConfig.VolumesFrom {
-		from, _, err := volume.ParseVolumesFrom(fromSpec)
+		from, _, err := parser.ParseVolumesFrom(fromSpec)
 		if err != nil {
 			logrus.WithError(err).WithField("id", container.ID).Error("Error reading volumes-from spec during mount spec backport")
 			continue
diff --git a/daemon/volumes_unit_test.go b/daemon/volumes_unit_test.go
index 450d17f..3f57f0c 100644
--- a/daemon/volumes_unit_test.go
+++ b/daemon/volumes_unit_test.go
@@ -1,6 +1,7 @@
 package daemon
 
 import (
+	"runtime"
 	"testing"
 
 	"github.com/docker/docker/volume"
@@ -20,8 +21,10 @@
 		{"foobar:baz", "", "", true},
 	}
 
+	parser := volume.NewParser(runtime.GOOS)
+
 	for _, c := range cases {
-		id, mode, err := volume.ParseVolumesFrom(c.spec)
+		id, mode, err := parser.ParseVolumesFrom(c.spec)
 		if c.fail {
 			if err == nil {
 				t.Fatalf("Expected error, was nil, for spec %s\n", c.spec)
diff --git a/daemon/volumes_unix.go b/daemon/volumes_unix.go
index 0a4cbf8..bee1fb1 100644
--- a/daemon/volumes_unix.go
+++ b/daemon/volumes_unix.go
@@ -1,7 +1,5 @@
 // +build !windows
 
-// TODO(amitkris): We need to split this file for solaris.
-
 package daemon
 
 import (
@@ -86,8 +84,13 @@
 	// remapped root (user namespaces)
 	rootIDs := daemon.idMappings.RootPair()
 	for _, mount := range netMounts {
-		if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil {
-			return nil, err
+		// we should only modify ownership of network files within our own container
+		// metadata repository. If the user specifies a mount path external, it is
+		// up to the user to make sure the file has proper ownership for userns
+		if strings.Index(mount.Source, daemon.repository) == 0 {
+			if err := os.Chown(mount.Source, rootIDs.UID, rootIDs.GID); err != nil {
+				return nil, err
+			}
 		}
 	}
 	return append(mounts, netMounts...), nil
diff --git a/distribution/config.go b/distribution/config.go
index 1c10533..16d729c 100644
--- a/distribution/config.go
+++ b/distribution/config.go
@@ -86,7 +86,7 @@
 type ImageConfigStore interface {
 	Put([]byte) (digest.Digest, error)
 	Get(digest.Digest) ([]byte, error)
-	RootFSAndPlatformFromConfig([]byte) (*image.RootFS, layer.Platform, error)
+	RootFSAndOSFromConfig([]byte) (*image.RootFS, layer.OS, error)
 }
 
 // PushLayerProvider provides layers to be pushed by ChainID.
@@ -112,7 +112,7 @@
 	// returns the final rootfs.
 	// Given progress output to track download progress
 	// Returns function to release download resources
-	Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error)
+	Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error)
 }
 
 type imageConfigStore struct {
@@ -140,7 +140,7 @@
 	return img.RawJSON(), nil
 }
 
-func (s *imageConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) {
+func (s *imageConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
 	var unmarshalledConfig image.Image
 	if err := json.Unmarshal(c, &unmarshalledConfig); err != nil {
 		return nil, "", err
@@ -154,11 +154,11 @@
 		return nil, "", fmt.Errorf("image operating system %q cannot be used on this platform", unmarshalledConfig.OS)
 	}
 
-	platform := ""
+	os := ""
 	if runtime.GOOS == "windows" {
-		platform = unmarshalledConfig.OS
+		os = unmarshalledConfig.OS
 	}
-	return unmarshalledConfig.RootFS, layer.Platform(platform), nil
+	return unmarshalledConfig.RootFS, layer.OS(os), nil
 }
 
 type storeLayerProvider struct {
diff --git a/distribution/pull.go b/distribution/pull.go
index 68144df..446197c 100644
--- a/distribution/pull.go
+++ b/distribution/pull.go
@@ -2,6 +2,7 @@
 
 import (
 	"fmt"
+	"runtime"
 
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api"
@@ -20,7 +21,7 @@
 	// Pull tries to pull the image referenced by `tag`
 	// Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint.
 	//
-	Pull(ctx context.Context, ref reference.Named) error
+	Pull(ctx context.Context, ref reference.Named, platform string) error
 }
 
 // newPuller returns a Puller interface that will pull from either a v1 or v2
@@ -113,7 +114,13 @@
 			lastErr = err
 			continue
 		}
-		if err := puller.Pull(ctx, ref); err != nil {
+
+		// Make sure we default the platform if it hasn't been supplied
+		if imagePullConfig.Platform == "" {
+			imagePullConfig.Platform = runtime.GOOS
+		}
+
+		if err := puller.Pull(ctx, ref, imagePullConfig.Platform); err != nil {
 			// Was this pull cancelled? If so, don't try to fall
 			// back.
 			fallback := false
diff --git a/distribution/pull_v1.go b/distribution/pull_v1.go
index 19a8792..6f1c2ee 100644
--- a/distribution/pull_v1.go
+++ b/distribution/pull_v1.go
@@ -12,6 +12,7 @@
 	"time"
 
 	"github.com/docker/distribution/reference"
+	"github.com/docker/distribution/registry/client/auth"
 	"github.com/docker/distribution/registry/client/transport"
 	"github.com/docker/docker/distribution/metadata"
 	"github.com/docker/docker/distribution/xfer"
@@ -35,7 +36,7 @@
 	session     *registry.Session
 }
 
-func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error {
+func (p *v1Puller) Pull(ctx context.Context, ref reference.Named, platform string) error {
 	if _, isCanonical := ref.(reference.Canonical); isCanonical {
 		// Allowing fallback, because HTTPS v1 is before HTTP v2
 		return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}}
@@ -49,7 +50,7 @@
 	tr := transport.NewTransport(
 		// TODO(tiborvass): was ReceiveTimeout
 		registry.NewTransport(tlsConfig),
-		registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
+		registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
 	)
 	client := registry.HTTPClient(tr)
 	v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)
@@ -68,7 +69,9 @@
 	return nil
 }
 
-func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error {
+// Note use auth.Scope rather than reference.Named due to this warning causing Jenkins CI to fail:
+// warning: ref can be github.com/docker/docker/vendor/github.com/docker/distribution/registry/client/auth.Scope (interfacer)
+func (p *v1Puller) pullRepository(ctx context.Context, ref auth.Scope) error {
 	progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.Name.Name())
 
 	tagged, isTagged := ref.(reference.NamedTagged)
diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go
index 08a24e5..c8d784c 100644
--- a/distribution/pull_v2.go
+++ b/distribution/pull_v2.go
@@ -8,6 +8,7 @@
 	"net/url"
 	"os"
 	"runtime"
+	"strings"
 
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/manifest/manifestlist"
@@ -61,7 +62,7 @@
 	confirmedV2 bool
 }
 
-func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
+func (p *v2Puller) Pull(ctx context.Context, ref reference.Named, platform string) (err error) {
 	// TODO(tiborvass): was ReceiveTimeout
 	p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
 	if err != nil {
@@ -69,7 +70,7 @@
 		return err
 	}
 
-	if err = p.pullV2Repository(ctx, ref); err != nil {
+	if err = p.pullV2Repository(ctx, ref, platform); err != nil {
 		if _, ok := err.(fallbackError); ok {
 			return err
 		}
@@ -84,10 +85,10 @@
 	return err
 }
 
-func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
+func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named, platform string) (err error) {
 	var layersDownloaded bool
 	if !reference.IsNameOnly(ref) {
-		layersDownloaded, err = p.pullV2Tag(ctx, ref)
+		layersDownloaded, err = p.pullV2Tag(ctx, ref, platform)
 		if err != nil {
 			return err
 		}
@@ -109,7 +110,7 @@
 			if err != nil {
 				return err
 			}
-			pulledNew, err := p.pullV2Tag(ctx, tagRef)
+			pulledNew, err := p.pullV2Tag(ctx, tagRef, platform)
 			if err != nil {
 				// Since this is the pull-all-tags case, don't
 				// allow an error pulling a particular tag to
@@ -325,7 +326,7 @@
 	ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()})
 }
 
-func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
+func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named, os string) (tagUpdated bool, err error) {
 	manSvc, err := p.repo.Manifests(ctx)
 	if err != nil {
 		return false, err
@@ -389,17 +390,17 @@
 		if p.config.RequireSchema2 {
 			return false, fmt.Errorf("invalid manifest: not schema2")
 		}
-		id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
+		id, manifestDigest, err = p.pullSchema1(ctx, ref, v, os)
 		if err != nil {
 			return false, err
 		}
 	case *schema2.DeserializedManifest:
-		id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
+		id, manifestDigest, err = p.pullSchema2(ctx, ref, v, os)
 		if err != nil {
 			return false, err
 		}
 	case *manifestlist.DeserializedManifestList:
-		id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
+		id, manifestDigest, err = p.pullManifestList(ctx, ref, v, os)
 		if err != nil {
 			return false, err
 		}
@@ -435,7 +436,7 @@
 	return true, nil
 }
 
-func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
+func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Reference, unverifiedManifest *schema1.SignedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) {
 	var verifiedManifest *schema1.Manifest
 	verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
 	if err != nil {
@@ -490,7 +491,8 @@
 	// The v1 manifest itself doesn't directly contain a platform. However,
 	// the history does, but unfortunately that's a string, so search through
 	// all the history until hopefully we find one which indicates the os.
-	platform := runtime.GOOS
+	// supertest2014/nyan is an example of a registry image with schemav1.
+	configOS := runtime.GOOS
 	if system.LCOWSupported() {
 		type config struct {
 			Os string `json:"os,omitempty"`
@@ -499,14 +501,20 @@
 			var c config
 			if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil {
 				if c.Os != "" {
-					platform = c.Os
+					configOS = c.Os
 					break
 				}
 			}
 		}
 	}
 
-	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.Platform(platform), descriptors, p.config.ProgressOutput)
+	// Early bath if the requested OS doesn't match that of the configuration.
+	// This avoids doing the download, only to potentially fail later.
+	if !strings.EqualFold(configOS, requestedOS) {
+		return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
+	}
+
+	resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.OS(configOS), descriptors, p.config.ProgressOutput)
 	if err != nil {
 		return "", "", err
 	}
@@ -527,7 +535,7 @@
 	return imageID, manifestDigest, nil
 }
 
-func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
+func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest, requestedOS string) (id digest.Digest, manifestDigest digest.Digest, err error) {
 	manifestDigest, err = schema2ManifestDigest(ref, mfst)
 	if err != nil {
 		return "", "", err
@@ -576,11 +584,11 @@
 	}()
 
 	var (
-		configJSON       []byte         // raw serialized image config
-		downloadedRootFS *image.RootFS  // rootFS from registered layers
-		configRootFS     *image.RootFS  // rootFS from configuration
-		release          func()         // release resources from rootFS download
-		platform         layer.Platform // for LCOW when registering downloaded layers
+		configJSON       []byte        // raw serialized image config
+		downloadedRootFS *image.RootFS // rootFS from registered layers
+		configRootFS     *image.RootFS // rootFS from configuration
+		release          func()        // release resources from rootFS download
+		configOS         layer.OS      // for LCOW when registering downloaded layers
 	)
 
 	// https://github.com/docker/docker/issues/24766 - Err on the side of caution,
@@ -592,7 +600,7 @@
 	// check to block Windows images being pulled on Linux is implemented, it
 	// may be necessary to perform the same type of serialisation.
 	if runtime.GOOS == "windows" {
-		configJSON, configRootFS, platform, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
+		configJSON, configRootFS, configOS, err = receiveConfig(p.config.ImageStore, configChan, configErrChan)
 		if err != nil {
 			return "", "", err
 		}
@@ -605,6 +613,12 @@
 			return "", "", errRootFSMismatch
 		}
 
+		// Early bath if the requested OS doesn't match that of the configuration.
+		// This avoids doing the download, only to potentially fail later.
+		if !strings.EqualFold(string(configOS), requestedOS) {
+			return "", "", fmt.Errorf("cannot download image with operating system %q when requesting %q", configOS, requestedOS)
+		}
+
 		// Populate diff ids in descriptors to avoid downloading foreign layers
 		// which have been side loaded
 		for i := range descriptors {
@@ -619,7 +633,7 @@
 				rootFS image.RootFS
 			)
 			downloadRootFS := *image.NewRootFS()
-			rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, platform, descriptors, p.config.ProgressOutput)
+			rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, layer.OS(requestedOS), descriptors, p.config.ProgressOutput)
 			if err != nil {
 				// Intentionally do not cancel the config download here
 				// as the error from config download (if there is one)
@@ -684,14 +698,14 @@
 	return imageID, manifestDigest, nil
 }
 
-func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, layer.Platform, error) {
+func receiveConfig(s ImageConfigStore, configChan <-chan []byte, errChan <-chan error) ([]byte, *image.RootFS, layer.OS, error) {
 	select {
 	case configJSON := <-configChan:
-		rootfs, platform, err := s.RootFSAndPlatformFromConfig(configJSON)
+		rootfs, os, err := s.RootFSAndOSFromConfig(configJSON)
 		if err != nil {
 			return nil, nil, "", err
 		}
-		return configJSON, rootfs, platform, nil
+		return configJSON, rootfs, os, nil
 	case err := <-errChan:
 		return nil, nil, "", err
 		// Don't need a case for ctx.Done in the select because cancellation
@@ -701,31 +715,27 @@
 
 // pullManifestList handles "manifest lists" which point to various
 // platform-specific manifests.
-func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) {
+func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList, os string) (id digest.Digest, manifestListDigest digest.Digest, err error) {
 	manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
 	if err != nil {
 		return "", "", err
 	}
 
-	logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a os/arch match", ref, len(mfstList.Manifests))
-	var manifestDigest digest.Digest
-	for _, manifestDescriptor := range mfstList.Manifests {
-		// TODO(aaronl): The manifest list spec supports optional
-		// "features" and "variant" fields. These are not yet used.
-		// Once they are, their values should be interpreted here.
-		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
-			manifestDigest = manifestDescriptor.Digest
-			logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDigest.String())
-			break
-		}
-	}
+	logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a %s/%s match", ref, len(mfstList.Manifests), os, runtime.GOARCH)
 
-	if manifestDigest == "" {
-		errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", runtime.GOOS, runtime.GOARCH)
+	manifestMatches := filterManifests(mfstList.Manifests, os)
+
+	if len(manifestMatches) == 0 {
+		errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", os, runtime.GOARCH)
 		logrus.Debugf(errMsg)
 		return "", "", errors.New(errMsg)
 	}
 
+	if len(manifestMatches) > 1 {
+		logrus.Debugf("found multiple matches in manifest list, choosing best match %s", manifestMatches[0].Digest.String())
+	}
+	manifestDigest := manifestMatches[0].Digest
+
 	manSvc, err := p.repo.Manifests(ctx)
 	if err != nil {
 		return "", "", err
@@ -743,12 +753,12 @@
 
 	switch v := manifest.(type) {
 	case *schema1.SignedManifest:
-		id, _, err = p.pullSchema1(ctx, manifestRef, v)
+		id, _, err = p.pullSchema1(ctx, manifestRef, v, os)
 		if err != nil {
 			return "", "", err
 		}
 	case *schema2.DeserializedManifest:
-		id, _, err = p.pullSchema2(ctx, manifestRef, v)
+		id, _, err = p.pullSchema2(ctx, manifestRef, v, os)
 		if err != nil {
 			return "", "", err
 		}
diff --git a/distribution/pull_v2_unix.go b/distribution/pull_v2_unix.go
index 45a7a0c..666ccc8 100644
--- a/distribution/pull_v2_unix.go
+++ b/distribution/pull_v2_unix.go
@@ -3,11 +3,27 @@
 package distribution
 
 import (
+	"runtime"
+
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/manifest/manifestlist"
+	"github.com/sirupsen/logrus"
 )
 
 func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) {
 	blobs := ld.repo.Blobs(ctx)
 	return blobs.Open(ctx, ld.digest)
 }
+
+func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor {
+	var matches []manifestlist.ManifestDescriptor
+	for _, manifestDescriptor := range manifests {
+		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os {
+			matches = append(matches, manifestDescriptor)
+
+			logrus.Debugf("found match for %s/%s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String())
+		}
+	}
+	return matches
+}
diff --git a/distribution/pull_v2_windows.go b/distribution/pull_v2_windows.go
index e10070d..b4573e1 100644
--- a/distribution/pull_v2_windows.go
+++ b/distribution/pull_v2_windows.go
@@ -3,13 +3,19 @@
 package distribution
 
 import (
+	"fmt"
 	"net/http"
 	"os"
+	"runtime"
+	"sort"
+	"strings"
 
 	"github.com/docker/distribution"
 	"github.com/docker/distribution/context"
+	"github.com/docker/distribution/manifest/manifestlist"
 	"github.com/docker/distribution/manifest/schema2"
 	"github.com/docker/distribution/registry/client/transport"
+	"github.com/docker/docker/pkg/system"
 	"github.com/sirupsen/logrus"
 )
 
@@ -55,3 +61,51 @@
 	}
 	return rsc, err
 }
+
+func filterManifests(manifests []manifestlist.ManifestDescriptor, os string) []manifestlist.ManifestDescriptor {
+	osVersion := ""
+	if os == "windows" {
+		// TODO: Add UBR (Update Build Release) component after build
+		version := system.GetOSVersion()
+		osVersion = fmt.Sprintf("%d.%d.%d", version.MajorVersion, version.MinorVersion, version.Build)
+		logrus.Debugf("will prefer entries with version %s", osVersion)
+	}
+
+	var matches []manifestlist.ManifestDescriptor
+	for _, manifestDescriptor := range manifests {
+		// TODO: Consider filtering out greater versions, including only greater UBR
+		if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == os {
+			matches = append(matches, manifestDescriptor)
+			logrus.Debugf("found match for %s/%s with media type %s, digest %s", os, runtime.GOARCH, manifestDescriptor.MediaType, manifestDescriptor.Digest.String())
+		}
+	}
+	if os == "windows" {
+		sort.Stable(manifestsByVersion{osVersion, matches})
+	}
+	return matches
+}
+
+func versionMatch(actual, expected string) bool {
+	// Check whether the version matches up to the build, ignoring UBR
+	return strings.HasPrefix(actual, expected+".")
+}
+
+type manifestsByVersion struct {
+	version string
+	list    []manifestlist.ManifestDescriptor
+}
+
+func (mbv manifestsByVersion) Less(i, j int) bool {
+	// TODO: Split version by parts and compare
+	// TODO: Prefer versions which have a greater version number
+	// Move compatible versions to the top, with no other ordering changes
+	return versionMatch(mbv.list[i].Platform.OSVersion, mbv.version) && !versionMatch(mbv.list[j].Platform.OSVersion, mbv.version)
+}
+
+func (mbv manifestsByVersion) Len() int {
+	return len(mbv.list)
+}
+
+func (mbv manifestsByVersion) Swap(i, j int) {
+	mbv.list[i], mbv.list[j] = mbv.list[j], mbv.list[i]
+}
diff --git a/distribution/push.go b/distribution/push.go
index 25b49fa..869a53e 100644
--- a/distribution/push.go
+++ b/distribution/push.go
@@ -69,7 +69,7 @@
 		return err
 	}
 
-	progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.Name.Name())
+	progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to repository [%s]", repoInfo.Name.Name())
 
 	associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo.Name)
 	if len(associations) == 0 {
diff --git a/distribution/push_v1.go b/distribution/push_v1.go
index 6cc4f15..dccc7b0 100644
--- a/distribution/push_v1.go
+++ b/distribution/push_v1.go
@@ -38,7 +38,7 @@
 	tr := transport.NewTransport(
 		// TODO(tiborvass): was NoTimeout
 		registry.NewTransport(tlsConfig),
-		registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
+		registry.Headers(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)...,
 	)
 	client := registry.HTTPClient(tr)
 	v1Endpoint := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)
diff --git a/distribution/push_v2.go b/distribution/push_v2.go
index 5ceac8b..7ffce5b 100644
--- a/distribution/push_v2.go
+++ b/distribution/push_v2.go
@@ -118,7 +118,7 @@
 		return fmt.Errorf("could not find image from tag %s: %v", reference.FamiliarString(ref), err)
 	}
 
-	rootfs, _, err := p.config.ImageStore.RootFSAndPlatformFromConfig(imgConfig)
+	rootfs, _, err := p.config.ImageStore.RootFSAndOSFromConfig(imgConfig)
 	if err != nil {
 		return fmt.Errorf("unable to get rootfs for image %s: %s", reference.FamiliarString(ref), err)
 	}
@@ -395,12 +395,7 @@
 	defer layerUpload.Close()
 
 	// upload the blob
-	desc, err := pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload)
-	if err != nil {
-		return desc, err
-	}
-
-	return desc, nil
+	return pd.uploadUsingSession(ctx, progressOutput, diffID, layerUpload)
 }
 
 func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) {
diff --git a/distribution/registry.go b/distribution/registry.go
index bce270a..03d0524 100644
--- a/distribution/registry.go
+++ b/distribution/registry.go
@@ -82,7 +82,7 @@
 		base.Dial = proxyDialer.Dial
 	}
 
-	modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders)
+	modifiers := registry.Headers(dockerversion.DockerUserAgent(ctx), metaHeaders)
 	authTransport := transport.NewTransport(base, modifiers...)
 
 	challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport)
diff --git a/distribution/registry_unit_test.go b/distribution/registry_unit_test.go
index d6b6ee8..db18149 100644
--- a/distribution/registry_unit_test.go
+++ b/distribution/registry_unit_test.go
@@ -1,12 +1,9 @@
 package distribution
 
 import (
-	"fmt"
-	"io/ioutil"
 	"net/http"
 	"net/http/httptest"
 	"net/url"
-	"os"
 	"runtime"
 	"strings"
 	"testing"
@@ -14,8 +11,6 @@
 	"github.com/docker/distribution/reference"
 	"github.com/docker/docker/api/types"
 	registrytypes "github.com/docker/docker/api/types/registry"
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/docker/registry"
 	"github.com/sirupsen/logrus"
 	"golang.org/x/net/context"
@@ -42,12 +37,6 @@
 }
 
 func testTokenPassThru(t *testing.T, ts *httptest.Server) {
-	tmp, err := testDirectory("")
-	if err != nil {
-		t.Fatal(err)
-	}
-	defer os.RemoveAll(tmp)
-
 	uri, err := url.Parse(ts.URL)
 	if err != nil {
 		t.Fatalf("could not parse url from test server: %v", err)
@@ -95,7 +84,7 @@
 	logrus.Debug("About to pull")
 	// We expect it to fail, since we haven't mock'd the full registry exchange in our handler above
 	tag, _ := reference.WithTag(n, "tag_goes_here")
-	_ = p.pullV2Repository(ctx, tag)
+	_ = p.pullV2Repository(ctx, tag, runtime.GOOS)
 }
 
 func TestTokenPassThru(t *testing.T) {
@@ -137,36 +126,3 @@
 		t.Fatal("Redirect should not forward Authorization header to another host")
 	}
 }
-
-// testDirectory creates a new temporary directory and returns its path.
-// The contents of directory at path `templateDir` is copied into the
-// new directory.
-func testDirectory(templateDir string) (dir string, err error) {
-	testID := stringid.GenerateNonCryptoID()[:4]
-	prefix := fmt.Sprintf("docker-test%s-%s-", testID, getCallerName(2))
-	if prefix == "" {
-		prefix = "docker-test-"
-	}
-	dir, err = ioutil.TempDir("", prefix)
-	if err = os.Remove(dir); err != nil {
-		return
-	}
-	if templateDir != "" {
-		if err = archive.NewDefaultArchiver().CopyWithTar(templateDir, dir); err != nil {
-			return
-		}
-	}
-	return
-}
-
-// getCallerName introspects the call stack and returns the name of the
-// function `depth` levels down in the stack.
-func getCallerName(depth int) string {
-	// Use the caller function name as a prefix.
-	// This helps trace temp directories back to their test.
-	pc, _, _, _ := runtime.Caller(depth + 1)
-	callerLongName := runtime.FuncForPC(pc).Name()
-	parts := strings.Split(callerLongName, ".")
-	callerShortName := parts[len(parts)-1]
-	return callerShortName
-}
diff --git a/distribution/xfer/download.go b/distribution/xfer/download.go
index 6ee0aa5..043119e 100644
--- a/distribution/xfer/download.go
+++ b/distribution/xfer/download.go
@@ -95,7 +95,7 @@
 // Download method is called to get the layer tar data. Layers are then
 // registered in the appropriate order.  The caller must call the returned
 // release function once it is done with the returned RootFS object.
-func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
+func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
 	var (
 		topLayer       layer.Layer
 		topDownload    *downloadTransfer
@@ -105,9 +105,9 @@
 		downloadsByKey = make(map[string]*downloadTransfer)
 	)
 
-	// Assume that the platform is the host OS if blank
-	if platform == "" {
-		platform = layer.Platform(runtime.GOOS)
+	// Assume that the operating system is the host OS if blank
+	if os == "" {
+		os = layer.OS(runtime.GOOS)
 	}
 
 	rootFS := initialRootFS
@@ -121,13 +121,13 @@
 			if err == nil {
 				getRootFS := rootFS
 				getRootFS.Append(diffID)
-				l, err := ldm.layerStores[string(platform)].Get(getRootFS.ChainID())
+				l, err := ldm.layerStores[string(os)].Get(getRootFS.ChainID())
 				if err == nil {
 					// Layer already exists.
 					logrus.Debugf("Layer already exists: %s", descriptor.ID())
 					progress.Update(progressOutput, descriptor.ID(), "Already exists")
 					if topLayer != nil {
-						layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer)
+						layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer)
 					}
 					topLayer = l
 					missingLayer = false
@@ -146,7 +146,7 @@
 		// the stack? If so, avoid downloading it more than once.
 		var topDownloadUncasted Transfer
 		if existingDownload, ok := downloadsByKey[key]; ok {
-			xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, platform)
+			xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload, os)
 			defer topDownload.Transfer.Release(watcher)
 			topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput)
 			topDownload = topDownloadUncasted.(*downloadTransfer)
@@ -158,10 +158,10 @@
 
 		var xferFunc DoFunc
 		if topDownload != nil {
-			xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, platform)
+			xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload, os)
 			defer topDownload.Transfer.Release(watcher)
 		} else {
-			xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, platform)
+			xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil, os)
 		}
 		topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput)
 		topDownload = topDownloadUncasted.(*downloadTransfer)
@@ -171,7 +171,7 @@
 	if topDownload == nil {
 		return rootFS, func() {
 			if topLayer != nil {
-				layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer)
+				layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer)
 			}
 		}, nil
 	}
@@ -182,7 +182,7 @@
 
 	defer func() {
 		if topLayer != nil {
-			layer.ReleaseAndLog(ldm.layerStores[string(platform)], topLayer)
+			layer.ReleaseAndLog(ldm.layerStores[string(os)], topLayer)
 		}
 	}()
 
@@ -218,11 +218,11 @@
 // complete before the registration step, and registers the downloaded data
 // on top of parentDownload's resulting layer. Otherwise, it registers the
 // layer on top of the ChainID given by parentLayer.
-func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, platform layer.Platform) DoFunc {
+func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer, os layer.OS) DoFunc {
 	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
 		d := &downloadTransfer{
 			Transfer:   NewTransfer(),
-			layerStore: ldm.layerStores[string(platform)],
+			layerStore: ldm.layerStores[string(os)],
 		}
 
 		go func() {
@@ -341,9 +341,9 @@
 				src = fs.Descriptor()
 			}
 			if ds, ok := d.layerStore.(layer.DescribableStore); ok {
-				d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, platform, src)
+				d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, os, src)
 			} else {
-				d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer, platform)
+				d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer, os)
 			}
 			if err != nil {
 				select {
@@ -382,11 +382,11 @@
 // parentDownload. This function does not log progress output because it would
 // interfere with the progress reporting for sourceDownload, which has the same
 // Key.
-func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, platform layer.Platform) DoFunc {
+func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer, os layer.OS) DoFunc {
 	return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer {
 		d := &downloadTransfer{
 			Transfer:   NewTransfer(),
-			layerStore: ldm.layerStores[string(platform)],
+			layerStore: ldm.layerStores[string(os)],
 		}
 
 		go func() {
@@ -440,9 +440,9 @@
 				src = fs.Descriptor()
 			}
 			if ds, ok := d.layerStore.(layer.DescribableStore); ok {
-				d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, platform, src)
+				d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, os, src)
 			} else {
-				d.layer, err = d.layerStore.Register(layerReader, parentLayer, platform)
+				d.layer, err = d.layerStore.Register(layerReader, parentLayer, os)
 			}
 			if err != nil {
 				d.err = fmt.Errorf("failed to register layer: %v", err)
diff --git a/distribution/xfer/download_test.go b/distribution/xfer/download_test.go
index e5aba02..8af27f6 100644
--- a/distribution/xfer/download_test.go
+++ b/distribution/xfer/download_test.go
@@ -26,7 +26,7 @@
 	diffID    layer.DiffID
 	chainID   layer.ChainID
 	parent    layer.Layer
-	platform  layer.Platform
+	os        layer.OS
 }
 
 func (ml *mockLayer) TarStream() (io.ReadCloser, error) {
@@ -57,8 +57,8 @@
 	return 0, nil
 }
 
-func (ml *mockLayer) Platform() layer.Platform {
-	return ml.platform
+func (ml *mockLayer) OS() layer.OS {
+	return ml.os
 }
 
 func (ml *mockLayer) Metadata() (map[string]string, error) {
@@ -91,7 +91,7 @@
 	return layers
 }
 
-func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, platform layer.Platform) (layer.Layer, error) {
+func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID, os layer.OS) (layer.Layer, error) {
 	return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{})
 }
 
@@ -293,13 +293,13 @@
 	firstDescriptor := descriptors[0].(*mockDownloadDescriptor)
 
 	// Pre-register the first layer to simulate an already-existing layer
-	l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", layer.Platform(runtime.GOOS))
+	l, err := layerStore.Register(firstDescriptor.mockTarStream(), "", layer.OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
 	firstDescriptor.diffID = l.DiffID()
 
-	rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), layer.Platform(runtime.GOOS), descriptors, progress.ChanOutput(progressChan))
+	rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), layer.OS(runtime.GOOS), descriptors, progress.ChanOutput(progressChan))
 	if err != nil {
 		t.Fatalf("download error: %v", err)
 	}
@@ -357,7 +357,7 @@
 	}()
 
 	descriptors := downloadDescriptors(nil)
-	_, _, err := ldm.Download(ctx, *image.NewRootFS(), layer.Platform(runtime.GOOS), descriptors, progress.ChanOutput(progressChan))
+	_, _, err := ldm.Download(ctx, *image.NewRootFS(), layer.OS(runtime.GOOS), descriptors, progress.ChanOutput(progressChan))
 	if err != context.Canceled {
 		t.Fatal("expected download to be cancelled")
 	}
diff --git a/docs/api/v1.18.md b/docs/api/v1.18.md
index 973dca9..3277014 100644
--- a/docs/api/v1.18.md
+++ b/docs/api/v1.18.md
@@ -7,8 +7,8 @@
 - /reference/api/docker_remote_api_v1.18/
 ---
 
-<!-- This file is maintained within the docker/docker Github
-     repository at https://github.com/docker/docker/. Make all
+<!-- This file is maintained within the moby/moby GitHub
+     repository at https://github.com/moby/moby/. Make all
      pull requests against that repo. If you see this file in
      another repository, consider it read-only there, as it will
      periodically be overwritten by the definitive file. Pull
@@ -23,6 +23,13 @@
  - The API tends to be REST, but for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
+ - A `Content-Length` header should be present in `POST` requests to endpoints
+   that expect a body.
+ - To lock to a specific version of the API, you prefix the URL with the version
+   of the API to use. For example, `/v1.18/info`. If no version is included in
+   the URL, the maximum supported API version is used.
+ - If the API version specified in the URL is not supported by the daemon, a HTTP
+   `400 Bad Request` error message is returned.
 
 ## 2. Endpoints
 
@@ -131,6 +138,7 @@
 
     POST /v1.18/containers/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
            "Hostname": "",
@@ -256,8 +264,14 @@
           should map to. A JSON object in the form
           `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
           Take note that `port` is specified as a string and not an integer value.
-    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+    -   **PublishAllPorts** - Allocates an ephemeral host port for all of a container's
           exposed ports. Specified as a boolean value.
+
+          Ports are de-allocated when the container stops and allocated when the container starts.
+          The allocated port might be changed when restarting the container.
+
+          The port is selected from the ephemeral port range that depends on the kernel.
+          For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
     -   **Privileged** - Gives the container full access to the host. Specified as
           a boolean value.
     -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
@@ -1077,6 +1091,7 @@
 
     POST /v1.18/containers/4fa6e0f0c678/copy HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Resource": "test.txt"
@@ -1556,6 +1571,7 @@
 
     POST /v1.18/auth HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "username": "hannibal",
@@ -1701,6 +1717,7 @@
 
     POST /v1.18/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Hostname": "",
@@ -1865,6 +1882,7 @@
 
     POST /v1.18/images/load
     Content-Type: application/x-tar
+    Content-Length: 12345
 
     Tarball in body
 
@@ -1908,6 +1926,7 @@
 
     POST /v1.18/containers/e90e34656806/exec HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "AttachStdin": true,
@@ -1953,6 +1972,7 @@
 
     POST /v1.18/exec/e90e34656806/start HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
      "Detach": false,
diff --git a/docs/api/v1.19.md b/docs/api/v1.19.md
index 3bcf766..448fe83 100644
--- a/docs/api/v1.19.md
+++ b/docs/api/v1.19.md
@@ -7,8 +7,8 @@
 - /reference/api/docker_remote_api_v1.19/
 ---
 
-<!-- This file is maintained within the docker/docker Github
-     repository at https://github.com/docker/docker/. Make all
+<!-- This file is maintained within the moby/moby GitHub
+     repository at https://github.com/moby/moby/. Make all
      pull requests against that repo. If you see this file in
      another repository, consider it read-only there, as it will
      periodically be overwritten by the definitive file. Pull
@@ -23,8 +23,13 @@
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
- - When the client API version is newer than the daemon's, these calls return an HTTP
-   `400 Bad Request` error message.
+ - A `Content-Length` header should be present in `POST` requests to endpoints
+   that expect a body.
+ - To lock to a specific version of the API, you prefix the URL with the version
+   of the API to use. For example, `/v1.18/info`. If no version is included in
+   the URL, the maximum supported API version is used.
+ - If the API version specified in the URL is not supported by the daemon, a HTTP
+   `400 Bad Request` error message is returned.
 
 ## 2. Endpoints
 
@@ -133,6 +138,7 @@
 
     POST /v1.19/containers/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
            "Hostname": "",
@@ -268,8 +274,14 @@
           should map to. A JSON object in the form
           `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
           Take note that `port` is specified as a string and not an integer value.
-    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+    -   **PublishAllPorts** - Allocates an ephemeral host port for all of a container's
           exposed ports. Specified as a boolean value.
+
+          Ports are de-allocated when the container stops and allocated when the container starts.
+          The allocated port might be changed when restarting the container.
+
+          The port is selected from the ephemeral port range that depends on the kernel.
+          For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
     -   **Privileged** - Gives the container full access to the host. Specified as
           a boolean value.
     -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
@@ -1116,6 +1128,7 @@
 
     POST /v1.19/containers/4fa6e0f0c678/copy HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Resource": "test.txt"
@@ -1628,6 +1641,7 @@
 
     POST /v1.19/auth HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "username": "hannibal",
@@ -1777,6 +1791,7 @@
 
     POST /v1.19/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Hostname": "",
@@ -1945,6 +1960,7 @@
 
     POST /v1.19/images/load
     Content-Type: application/x-tar
+    Content-Length: 12345
 
     Tarball in body
 
@@ -1988,6 +2004,7 @@
 
     POST /v1.19/containers/e90e34656806/exec HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "AttachStdin": true,
@@ -2036,6 +2053,7 @@
 
     POST /v1.19/exec/e90e34656806/start HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
      "Detach": false,
diff --git a/docs/api/v1.20.md b/docs/api/v1.20.md
index e5b8f32..b971bab 100644
--- a/docs/api/v1.20.md
+++ b/docs/api/v1.20.md
@@ -7,8 +7,8 @@
 - /reference/api/docker_remote_api_v1.20/
 ---
 
-<!-- This file is maintained within the docker/docker Github
-     repository at https://github.com/docker/docker/. Make all
+<!-- This file is maintained within the moby/moby GitHub
+     repository at https://github.com/moby/moby/. Make all
      pull requests against that repo. If you see this file in
      another repository, consider it read-only there, as it will
      periodically be overwritten by the definitive file. Pull
@@ -23,6 +23,13 @@
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
+ - A `Content-Length` header should be present in `POST` requests to endpoints
+   that expect a body.
+ - To lock to a specific version of the API, you prefix the URL with the version
+   of the API to use. For example, `/v1.18/info`. If no version is included in
+   the URL, the maximum supported API version is used.
+ - If the API version specified in the URL is not supported by the daemon, a HTTP
+   `400 Bad Request` error message is returned.
 
 ## 2. Endpoints
 
@@ -131,6 +138,7 @@
 
     POST /v1.20/containers/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
            "Hostname": "",
@@ -269,8 +277,14 @@
           should map to. A JSON object in the form
           `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
           Take note that `port` is specified as a string and not an integer value.
-    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+    -   **PublishAllPorts** - Allocates an ephemeral host port for all of a container's
           exposed ports. Specified as a boolean value.
+
+          Ports are de-allocated when the container stops and allocated when the container starts.
+          The allocated port might be changed when restarting the container.
+
+          The port is selected from the ephemeral port range that depends on the kernel.
+          For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
     -   **Privileged** - Gives the container full access to the host. Specified as
           a boolean value.
     -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
@@ -1125,6 +1139,7 @@
 
     POST /v1.20/containers/4fa6e0f0c678/copy HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Resource": "test.txt"
@@ -1776,6 +1791,7 @@
 
     POST /v1.20/auth HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "username": "hannibal",
@@ -1926,6 +1942,7 @@
 
     POST /v1.20/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Hostname": "",
@@ -2100,6 +2117,7 @@
 
     POST /v1.20/images/load
     Content-Type: application/x-tar
+    Content-Length: 12345
 
     Tarball in body
 
@@ -2143,6 +2161,7 @@
 
     POST /v1.20/containers/e90e34656806/exec HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "AttachStdin": true,
@@ -2191,6 +2210,7 @@
 
     POST /v1.20/exec/e90e34656806/start HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
      "Detach": false,
diff --git a/docs/api/v1.21.md b/docs/api/v1.21.md
index f1863b8..2971d25 100644
--- a/docs/api/v1.21.md
+++ b/docs/api/v1.21.md
@@ -7,8 +7,8 @@
 - /reference/api/docker_remote_api_v1.21/
 ---
 
-<!-- This file is maintained within the docker/docker Github
-     repository at https://github.com/docker/docker/. Make all
+<!-- This file is maintained within the moby/moby GitHub
+     repository at https://github.com/moby/moby/. Make all
      pull requests against that repo. If you see this file in
      another repository, consider it read-only there, as it will
      periodically be overwritten by the definitive file. Pull
@@ -23,8 +23,13 @@
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
- - When the client API version is newer than the daemon's, these calls return an HTTP
-   `400 Bad Request` error message.
+ - A `Content-Length` header should be present in `POST` requests to endpoints
+   that expect a body.
+ - To lock to a specific version of the API, you prefix the URL with the version
+   of the API to use. For example, `/v1.18/info`. If no version is included in
+   the URL, the maximum supported API version is used.
+ - If the API version specified in the URL is not supported by the daemon, a HTTP
+   `400 Bad Request` error message is returned.
 
 ## 2. Endpoints
 
@@ -137,6 +142,7 @@
 
     POST /v1.21/containers/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
            "Hostname": "",
@@ -288,8 +294,14 @@
           should map to. A JSON object in the form
           `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
           Take note that `port` is specified as a string and not an integer value.
-    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+    -   **PublishAllPorts** - Allocates an ephemeral host port for all of a container's
           exposed ports. Specified as a boolean value.
+
+          Ports are de-allocated when the container stops and allocated when the container starts.
+          The allocated port might be changed when restarting the container.
+
+          The port is selected from the ephemeral port range that depends on the kernel.
+          For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
     -   **Privileged** - Gives the container full access to the host. Specified as
           a boolean value.
     -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
@@ -1208,6 +1220,7 @@
 
     POST /v1.21/containers/4fa6e0f0c678/copy HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Resource": "test.txt"
@@ -1929,6 +1942,7 @@
 
     POST /v1.21/auth HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "username": "hannibal",
@@ -2081,6 +2095,7 @@
 
     POST /v1.21/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Hostname": "",
@@ -2256,6 +2271,7 @@
 
     POST /v1.21/images/load
     Content-Type: application/x-tar
+    Content-Length: 12345
 
     Tarball in body
 
@@ -2299,6 +2315,7 @@
 
     POST /v1.21/containers/e90e34656806/exec HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "AttachStdin": true,
@@ -2351,6 +2368,7 @@
 
     POST /v1.21/exec/e90e34656806/start HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
      "Detach": false,
@@ -2578,6 +2596,7 @@
 
     POST /v1.21/volumes/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "Name": "tardis"
@@ -2802,6 +2821,7 @@
 ```
 POST /v1.21/networks/create HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Name":"isolated_nw",
@@ -2865,6 +2885,7 @@
 ```
 POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Container":"3613f73ba0e4"
@@ -2896,6 +2917,7 @@
 ```
 POST /v1.21/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Container":"3613f73ba0e4"
diff --git a/docs/api/v1.22.md b/docs/api/v1.22.md
index 473ffd3..a51fbcb 100644
--- a/docs/api/v1.22.md
+++ b/docs/api/v1.22.md
@@ -7,8 +7,8 @@
 - /reference/api/docker_remote_api_v1.22/
 ---
 
-<!-- This file is maintained within the docker/docker Github
-     repository at https://github.com/docker/docker/. Make all
+<!-- This file is maintained within the moby/moby GitHub
+     repository at https://github.com/moby/moby/. Make all
      pull requests against that repo. If you see this file in
      another repository, consider it read-only there, as it will
      periodically be overwritten by the definitive file. Pull
@@ -23,6 +23,13 @@
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
+ - A `Content-Length` header should be present in `POST` requests to endpoints
+   that expect a body.
+ - To lock to a specific version of the API, you prefix the URL with the version
+   of the API to use. For example, `/v1.18/info`. If no version is included in
+   the URL, the maximum supported API version is used.
+ - If the API version specified in the URL is not supported by the daemon, a HTTP
+   `400 Bad Request` error message is returned.
 
 ## 2. Endpoints
 
@@ -220,6 +227,7 @@
 
     POST /v1.22/containers/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
            "Hostname": "",
@@ -400,8 +408,14 @@
           should map to. A JSON object in the form
           `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
           Take note that `port` is specified as a string and not an integer value.
-    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+    -   **PublishAllPorts** - Allocates an ephemeral host port for all of a container's
           exposed ports. Specified as a boolean value.
+
+          Ports are de-allocated when the container stops and allocated when the container starts.
+          The allocated port might be changed when restarting the container.
+
+          The port is selected from the ephemeral port range that depends on the kernel.
+          For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
     -   **Privileged** - Gives the container full access to the host. Specified as
           a boolean value.
     -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
@@ -1387,6 +1401,7 @@
 
     POST /v1.22/containers/4fa6e0f0c678/copy HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Resource": "test.txt"
@@ -2146,6 +2161,7 @@
 
     POST /v1.22/auth HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "username": "hannibal",
@@ -2315,6 +2331,7 @@
 
     POST /v1.22/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Hostname": "",
@@ -2647,6 +2664,7 @@
 
     POST /v1.22/images/load
     Content-Type: application/x-tar
+    Content-Length: 12345
 
     Tarball in body
 
@@ -2690,6 +2708,7 @@
 
     POST /v1.22/containers/e90e34656806/exec HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "AttachStdin": true,
@@ -2746,6 +2765,7 @@
 
     POST /v1.22/exec/e90e34656806/start HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
      "Detach": false,
@@ -2891,6 +2911,7 @@
 
     POST /v1.22/volumes/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "Name": "tardis"
@@ -3123,6 +3144,7 @@
 ```
 POST /v1.22/networks/create HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Name":"isolated_nw",
@@ -3195,6 +3217,7 @@
 ```
 POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Container":"3613f73ba0e4",
@@ -3232,6 +3255,7 @@
 ```
 POST /v1.22/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Container":"3613f73ba0e4",
diff --git a/docs/api/v1.23.md b/docs/api/v1.23.md
index c040d24..677dcab 100644
--- a/docs/api/v1.23.md
+++ b/docs/api/v1.23.md
@@ -7,8 +7,8 @@
 - /reference/api/docker_remote_api_v1.23/
 ---
 
-<!-- This file is maintained within the docker/docker Github
-     repository at https://github.com/docker/docker/. Make all
+<!-- This file is maintained within the moby/moby GitHub
+     repository at https://github.com/moby/moby/. Make all
      pull requests against that repo. If you see this file in
      another repository, consider it read-only there, as it will
      periodically be overwritten by the definitive file. Pull
@@ -23,8 +23,13 @@
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
- - When the client API version is newer than the daemon's, these calls return an HTTP
-   `400 Bad Request` error message.
+ - A `Content-Length` header should be present in `POST` requests to endpoints
+   that expect a body.
+ - To lock to a specific version of the API, you prefix the URL with the version
+   of the API to use. For example, `/v1.18/info`. If no version is included in
+   the URL, the maximum supported API version is used.
+ - If the API version specified in the URL is not supported by the daemon, a HTTP
+   `400 Bad Request` error message is returned.
 
 ## 2. Endpoints
 
@@ -244,6 +249,7 @@
 
     POST /v1.23/containers/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
            "Hostname": "",
@@ -426,8 +432,14 @@
           should map to. A JSON object in the form
           `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
           Take note that `port` is specified as a string and not an integer value.
-    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+    -   **PublishAllPorts** - Allocates an ephemeral host port for all of a container's
           exposed ports. Specified as a boolean value.
+
+          Ports are de-allocated when the container stops and allocated when the container starts.
+          The allocated port might be changed when restarting the container.
+
+          The port is selected from the ephemeral port range that depends on the kernel.
+          For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
     -   **Privileged** - Gives the container full access to the host. Specified as
           a boolean value.
     -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
@@ -1128,6 +1140,7 @@
 
        POST /v1.23/containers/e90e34656806/update HTTP/1.1
        Content-Type: application/json
+       Content-Length: 12345
 
        {
          "BlkioWeight": 300,
@@ -1422,6 +1435,7 @@
 
     POST /v1.23/containers/4fa6e0f0c678/copy HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Resource": "test.txt"
@@ -2190,6 +2204,7 @@
 
     POST /v1.23/auth HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "username": "hannibal",
@@ -2365,6 +2380,7 @@
 
     POST /v1.23/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Hostname": "",
@@ -2697,6 +2713,7 @@
 
     POST /v1.23/images/load
     Content-Type: application/x-tar
+    Content-Length: 12345
 
     Tarball in body
 
@@ -2766,6 +2783,7 @@
 
     POST /v1.23/containers/e90e34656806/exec HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "AttachStdin": true,
@@ -2822,6 +2840,7 @@
 
     POST /v1.23/exec/e90e34656806/start HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
      "Detach": false,
@@ -2967,6 +2986,7 @@
 
     POST /v1.23/volumes/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "Name": "tardis",
@@ -3224,6 +3244,7 @@
 ```
 POST /v1.23/networks/create HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Name":"isolated_nw",
@@ -3312,6 +3333,7 @@
 ```
 POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Container":"3613f73ba0e4",
@@ -3349,6 +3371,7 @@
 ```
 POST /v1.23/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Container":"3613f73ba0e4",
diff --git a/docs/api/v1.24.md b/docs/api/v1.24.md
index 29fed46..a32325e 100644
--- a/docs/api/v1.24.md
+++ b/docs/api/v1.24.md
@@ -7,8 +7,8 @@
 - /reference/api/docker_remote_api_v1.24/
 ---
 
-<!-- This file is maintained within the docker/docker Github
-     repository at https://github.com/docker/docker/. Make all
+<!-- This file is maintained within the moby/moby GitHub
+     repository at https://github.com/moby/moby/. Make all
      pull requests against that repo. If you see this file in
      another repository, consider it read-only there, as it will
      periodically be overwritten by the definitive file. Pull
@@ -23,6 +23,13 @@
  - The API tends to be REST. However, for some complex commands, like `attach`
    or `pull`, the HTTP connection is hijacked to transport `stdout`,
    `stdin` and `stderr`.
+ - A `Content-Length` header should be present in `POST` requests to endpoints
+   that expect a body.
+ - To lock to a specific version of the API, you prefix the URL with the version
+   of the API to use. For example, `/v1.18/info`. If no version is included in
+   the URL, the maximum supported API version is used.
+ - If the API version specified in the URL is not supported by the daemon, a HTTP
+   `400 Bad Request` error message is returned.
 
 ## 2. Errors
 
@@ -253,6 +260,7 @@
 
     POST /v1.24/containers/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
            "Hostname": "",
@@ -461,8 +469,14 @@
           should map to. A JSON object in the form
           `{ <port>/<protocol>: [{ "HostPort": "<port>" }] }`
           Take note that `port` is specified as a string and not an integer value.
-    -   **PublishAllPorts** - Allocates a random host port for all of a container's
+    -   **PublishAllPorts** - Allocates an ephemeral host port for all of a container's
           exposed ports. Specified as a boolean value.
+
+          Ports are de-allocated when the container stops and allocated when the container starts.
+          The allocated port might be changed when restarting the container.
+
+          The port is selected from the ephemeral port range that depends on the kernel.
+          For example, on Linux the range is defined by `/proc/sys/net/ipv4/ip_local_port_range`.
     -   **Privileged** - Gives the container full access to the host. Specified as
           a boolean value.
     -   **ReadonlyRootfs** - Mount the container's root filesystem as read only.
@@ -1172,6 +1186,7 @@
 
        POST /v1.24/containers/e90e34656806/update HTTP/1.1
        Content-Type: application/json
+       Content-Length: 12345
 
        {
          "BlkioWeight": 300,
@@ -2208,6 +2223,7 @@
 
     POST /v1.24/auth HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "username": "hannibal",
@@ -2387,6 +2403,7 @@
 
     POST /v1.24/commit?container=44c004db4b17&comment=message&repo=myrepo HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
          "Hostname": "",
@@ -2725,6 +2742,7 @@
 
     POST /v1.24/images/load
     Content-Type: application/x-tar
+    Content-Length: 12345
 
     Tarball in body
 
@@ -2794,6 +2812,7 @@
 
     POST /v1.24/containers/e90e34656806/exec HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "AttachStdin": true,
@@ -2850,6 +2869,7 @@
 
     POST /v1.24/exec/e90e34656806/start HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
      "Detach": false,
@@ -3000,6 +3020,7 @@
 
     POST /v1.24/volumes/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "Name": "tardis",
@@ -3290,6 +3311,7 @@
 ```
 POST /v1.24/networks/create HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Name":"isolated_nw",
@@ -3379,6 +3401,7 @@
 ```
 POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/connect HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Container":"3613f73ba0e4",
@@ -3417,6 +3440,7 @@
 ```
 POST /v1.24/networks/22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30/disconnect HTTP/1.1
 Content-Type: application/json
+Content-Length: 12345
 
 {
   "Container":"3613f73ba0e4",
@@ -4130,6 +4154,7 @@
 
     POST /v1.24/nodes/24ifsmvkjbyhk/update?version=8 HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "Availability": "active",
@@ -4231,6 +4256,7 @@
 
     POST /v1.24/swarm/init HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "ListenAddr": "0.0.0.0:2377",
@@ -4377,6 +4403,7 @@
 **Example request**:
 
     POST /v1.24/swarm/update HTTP/1.1
+    Content-Length: 12345
 
     {
       "Name": "default",
@@ -4576,6 +4603,7 @@
 
     POST /v1.24/services/create HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "Name": "web",
@@ -4876,6 +4904,7 @@
 
     POST /v1.24/services/1cb4dnqcyx6m66g2t538x3rxha/update?version=23 HTTP/1.1
     Content-Type: application/json
+    Content-Length: 12345
 
     {
       "Name": "top",
diff --git a/docs/api/version-history.md b/docs/api/version-history.md
index 6921e1e..5056c0d 100644
--- a/docs/api/version-history.md
+++ b/docs/api/version-history.md
@@ -4,8 +4,8 @@
 keywords: "API, Docker, rcli, REST, documentation"
 ---
 
-<!-- This file is maintained within the docker/docker Github
-     repository at https://github.com/docker/docker/. Make all
+<!-- This file is maintained within the moby/moby GitHub
+     repository at https://github.com/moby/moby/. Make all
      pull requests against that repo. If you see this file in
      another repository, consider it read-only there, as it will
      periodically be overwritten by the definitive file. Pull
@@ -13,6 +13,35 @@
      will be rejected.
 -->
 
+## v1.35 API changes
+
+[Docker Engine API v1.35](https://docs.docker.com/engine/api/v1.35/) documentation
+
+* `POST /services/create` and `POST /services/(id)/update` now accepts an
+  `Isolation` field on container spec to set the Isolation technology of the
+  containers running the service (`default`, `process`, or `hyperv`). This
+  configuration is only used for Windows containers.
+* `GET /containers/(name)/logs` now supports an additional query parameter: `until`,
+  which returns log lines that occurred before the specified timestamp.
+
+## v1.34 API changes
+
+[Docker Engine API v1.34](https://docs.docker.com/engine/api/v1.34/) documentation
+
+* `POST /containers/(name)/wait?condition=removed` now also also returns
+  in case of container removal failure. A pointer to a structure named
+  `Error` added to the response JSON in order to indicate a failure.
+  If `Error` is `null`, container removal has succeeded, otherwise
+  the test of an error message indicating why container removal has failed
+  is available from `Error.Message` field.
+
+## v1.33 API changes
+
+[Docker Engine API v1.33](https://docs.docker.com/engine/api/v1.33/) documentation
+
+* `GET /events` now supports filtering 4 more kinds of events: `config`, `node`,
+`secret` and `service`.
+
 ## v1.32 API changes
 
 [Docker Engine API v1.32](https://docs.docker.com/engine/api/v1.32/) documentation
@@ -20,6 +49,11 @@
 * `POST /containers/create` now accepts additional values for the
   `HostConfig.IpcMode` property. New values are `private`, `shareable`,
   and `none`.
+* `DELETE /networks/{id or name}` fixed issue where a `name` equal to another
+  network's name was able to mask that `id`. If both a network with the given
+  _name_ exists, and a network with the given _id_, the network with the given
+  _id_ is now deleted. This change is not versioned, and affects all API versions
+  if the daemon has this patch.
 
 ## v1.31 API changes
 
@@ -70,7 +104,7 @@
 * `POST /containers/(name)/wait` now accepts a `condition` query parameter to indicate which state change condition to wait for. Also, response headers are now returned immediately to acknowledge that the server has registered a wait callback for the client.
 * `POST /swarm/init` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic
 * `POST /swarm/join` now accepts a `DataPathAddr` property to set the IP-address or network interface to use for data traffic
-* `GET /events` now supports service, node and secret events which are emitted when users create, update and remove service, node and secret 
+* `GET /events` now supports service, node and secret events which are emitted when users create, update and remove service, node and secret
 * `GET /events` now supports network remove event which is emitted when users remove a swarm scoped network
 * `GET /events` now supports a filter type `scope` in which supported value could be swarm and local
 
diff --git a/docs/contributing/README.md b/docs/contributing/README.md
new file mode 100644
index 0000000..915c0cf
--- /dev/null
+++ b/docs/contributing/README.md
@@ -0,0 +1,8 @@
+### Get set up for Moby development
+
+ * [README first](who-written-for.md)
+ * [Get the required software](software-required.md)
+ * [Set up for development on Windows](software-req-win.md)
+ * [Configure Git for contributing](set-up-git.md)
+ * [Work with a development container](set-up-dev-env.md)
+ * [Run tests and test documentation](test.md)
diff --git a/docs/contributing/images/branch-sig.png b/docs/contributing/images/branch-sig.png
new file mode 100644
index 0000000..b069319
--- /dev/null
+++ b/docs/contributing/images/branch-sig.png
Binary files differ
diff --git a/docs/contributing/images/contributor-edit.png b/docs/contributing/images/contributor-edit.png
new file mode 100644
index 0000000..d847e22
--- /dev/null
+++ b/docs/contributing/images/contributor-edit.png
Binary files differ
diff --git a/docs/contributing/images/copy_url.png b/docs/contributing/images/copy_url.png
new file mode 100644
index 0000000..82df4ee
--- /dev/null
+++ b/docs/contributing/images/copy_url.png
Binary files differ
diff --git a/docs/contributing/images/fork_docker.png b/docs/contributing/images/fork_docker.png
new file mode 100644
index 0000000..88c6ed8
--- /dev/null
+++ b/docs/contributing/images/fork_docker.png
Binary files differ
diff --git a/docs/contributing/images/git_bash.png b/docs/contributing/images/git_bash.png
new file mode 100644
index 0000000..be2ec73
--- /dev/null
+++ b/docs/contributing/images/git_bash.png
Binary files differ
diff --git a/docs/contributing/images/list_example.png b/docs/contributing/images/list_example.png
new file mode 100644
index 0000000..2e3b59a
--- /dev/null
+++ b/docs/contributing/images/list_example.png
Binary files differ
diff --git a/docs/contributing/set-up-dev-env.md b/docs/contributing/set-up-dev-env.md
new file mode 100644
index 0000000..acd6888
--- /dev/null
+++ b/docs/contributing/set-up-dev-env.md
@@ -0,0 +1,321 @@
+### Work with a development container
+
+In this section, you learn to develop like the Moby Engine core team.
+The `moby/moby` repository includes a `Dockerfile` at its root. This file defines
+Moby's development environment. The `Dockerfile` lists the environment's
+dependencies: system libraries and binaries, Go environment, Go dependencies,
+etc.
+
+Moby's development environment is itself, ultimately a Docker container.
+You use the `moby/moby` repository and its `Dockerfile` to create a Docker image,
+run a Docker container, and develop code in the container.
+
+If you followed the procedures that <a href="/opensource/project/set-up-git/" target="_blank">
+set up Git for contributing</a>, you should have a fork of the `moby/moby`
+repository. You also created a branch called `dry-run-test`. In this section,
+you continue working with your fork on this branch.
+
+##  Task 1. Remove images and containers
+
+Moby developers run the latest stable release of the Docker software. They clean their local hosts of
+unnecessary Docker artifacts such as stopped containers or unused images.
+Cleaning unnecessary artifacts isn't strictly necessary, but it is good
+practice, so it is included here.
+
+To remove unnecessary artifacts:
+
+1. Verify that you have no unnecessary containers running on your host.
+
+   ```none
+   $ docker ps -a
+   ```
+
+   You should see something similar to the following:
+
+   ```none
+   CONTAINER ID        IMAGE               COMMAND             CREATED             STATUS              PORTS               NAMES
+   ```
+
+   There are no running or stopped containers on this host. A fast way to
+   remove old containers is the following:
+
+   You can now use the `docker system prune` command to achieve this:
+
+   ```none
+   $ docker system prune -a
+   ```
+
+   Older versions of the Docker Engine should reference the command below:
+
+   ```none
+   $ docker rm $(docker ps -a -q)
+   ```
+
+   This command uses `docker ps` to list all containers (`-a` flag) by numeric
+   IDs (`-q` flag). Then, the `docker rm` command removes the resulting list.
+   If you have running but unused containers, stop and then remove them with
+   the `docker stop` and `docker rm` commands.
+
+2. Verify that your host has no dangling images.
+
+   ```none
+   $ docker images
+   ```
+
+   You should see something similar to the following:
+
+   ```none
+   REPOSITORY          TAG                 IMAGE ID            CREATED             SIZE
+   ```
+
+   This host has no images. You may have one or more _dangling_ images. A
+   dangling image is not used by a running container and is not an ancestor of
+   another image on your system. A fast way to remove dangling image is
+   the following:
+
+   ```none
+   $ docker rmi -f $(docker images -q -a -f dangling=true)
+   ```
+
+   This command uses `docker images` to list all images (`-a` flag) by numeric
+   IDs (`-q` flag) and filter them to find dangling images (`-f dangling=true`).
+   Then, the `docker rmi` command forcibly (`-f` flag) removes
+   the resulting list. If you get a "docker: "rmi" requires a minimum of 1 argument."
+   message, that means there were no dangling images. To remove just one image, use the
+   `docker rmi ID` command.
+
+## Task 2. Start a development container
+
+If you followed the last procedure, your host is clean of unnecessary images and
+containers. In this section, you build an image from the Engine development
+environment and run it in the container. Both steps are automated for you by the
+Makefile in the Engine code repository. The first time you build an image, it
+can take over 15 minutes to complete.
+
+1. Open a terminal.
+
+   For [Docker Toolbox](../../toolbox/overview.md) users, use `docker-machine status your_vm_name` to make sure your VM is running. You
+   may need to run `eval "$(docker-machine env your_vm_name)"` to initialize your
+   shell environment. If you use Docker for Mac or Docker for Windows, you do not need
+   to use Docker Machine.
+
+2. Change into the root of the `moby-fork` repository.
+
+   ```none
+   $ cd ~/repos/moby-fork
+   ```
+
+   If you are following along with this guide, you created a `dry-run-test`
+   branch when you <a href="/opensource/project/set-up-git/" target="_blank">
+   set up Git for contributing</a>.
+
+3. Ensure you are on your `dry-run-test` branch.
+
+   ```none
+   $ git checkout dry-run-test
+   ```
+
+   If you get a message that the branch doesn't exist, add the `-b` flag (`git checkout -b dry-run-test`) so the
+   command both creates the branch and checks it out.
+
+4. Use `make` to build a development environment image and run it in a container.
+
+   ```none
+   $ make BIND_DIR=. shell
+   ```
+
+   Using the instructions in the
+   `Dockerfile`, the build may need to download and / or configure source and other images. On first build this process may take between 5 - 15 minutes to create an image. The command returns informational messages as it runs.  A
+   successful build returns a final message and opens a Bash shell into the
+   container.
+
+   ```none
+   Successfully built 3d872560918e
+   docker run --rm -i --privileged -e BUILDFLAGS -e KEEPBUNDLE -e DOCKER_BUILD_GOGC -e DOCKER_BUILD_PKGS -e DOCKER_CLIENTONLY -e DOCKER_DEBUG -e DOCKER_EXPERIMENTAL -e DOCKER_GITCOMMIT -e DOCKER_GRAPHDRIVER=devicemapper -e DOCKER_INCREMENTAL_BINARY -e DOCKER_REMAP_ROOT -e DOCKER_STORAGE_OPTS -e DOCKER_USERLANDPROXY -e TESTDIRS -e TESTFLAGS -e TIMEOUT -v "home/ubuntu/repos/docker/bundles:/go/src/github.com/moby/moby/bundles" -t "docker-dev:dry-run-test" bash
+   root@f31fa223770f:/go/src/github.com/moby/moby#
+   ```
+
+   At this point, your prompt reflects the container's BASH shell.
+
+5. List the contents of the current directory (`/go/src/github.com/moby/moby`).
+
+   You should see the image's source from the  `/go/src/github.com/moby/moby`
+   directory.
+
+   ![List example](images/list_example.png)
+
+6. Make a `dockerd` binary.
+
+   ```none
+   root@a8b2885ab900:/go/src/github.com/moby/moby# hack/make.sh binary
+   Removing bundles/
+
+   ---> Making bundle: binary (in bundles/binary)
+   Building: bundles/binary-daemon/dockerd-17.06.0-dev
+   Created binary: bundles/binary-daemon/dockerd-17.06.0-dev
+   Copying nested executables into bundles/binary-daemon
+
+   ```
+
+7. Run `make install`, which copies the binary to the container's
+   `/usr/local/bin/` directory.
+
+   ```none
+   root@a8b2885ab900:/go/src/github.com/moby/moby# make install
+   ```
+
+8. Start the Engine daemon running in the background.
+
+   ```none
+   root@a8b2885ab900:/go/src/github.com/docker/docker# dockerd -D &
+   ...output snipped...
+   DEBU[0001] Registering POST, /networks/{id:.*}/connect
+   DEBU[0001] Registering POST, /networks/{id:.*}/disconnect
+   DEBU[0001] Registering DELETE, /networks/{id:.*}
+   INFO[0001] API listen on /var/run/docker.sock
+   DEBU[0003] containerd connection state change: READY
+   ```
+
+   The `-D` flag starts the daemon in debug mode. The `&` starts it as a
+   background process. You'll find these options useful when debugging code
+   development. You will need to hit `return` in order to get back to your shell prompt.
+
+   > **Note**: The following command automates the `build`,
+   > `install`, and `run` steps above. Once the command below completes, hit `ctrl-z` to suspend the process, then run `bg 1` and hit `enter` to resume the daemon process in the background and get back to your shell prompt.
+
+   ```none
+   hack/make.sh binary install-binary run
+   ```
+
+9. Inside your container, check your Docker version.
+
+   ```none
+   root@5f8630b873fe:/go/src/github.com/moby/moby# docker --version
+   Docker version 1.12.0-dev, build 6e728fb
+   ```
+
+   Inside the container you are running a development version. This is the version
+   on the current branch. It reflects the value of the `VERSION` file at the
+   root of your `docker-fork` repository.
+
+10. Run the `hello-world` image.
+
+    ```none
+    root@5f8630b873fe:/go/src/github.com/moby/moby# docker run hello-world
+    ```
+
+11. List the image you just downloaded.
+
+    ```none
+    root@5f8630b873fe:/go/src/github.com/moby/moby# docker images
+	REPOSITORY   TAG     IMAGE ID      CREATED        SIZE
+	hello-world  latest  c54a2cc56cbb  3 months ago   1.85 kB
+    ```
+
+12. Open another terminal on your local host.
+
+13. List the container running your development container.
+
+    ```none
+    ubuntu@ubuntu1404:~$ docker ps
+    CONTAINER ID        IMAGE                     COMMAND             CREATED             STATUS              PORTS               NAMES
+    a8b2885ab900        docker-dev:dry-run-test   "hack/dind bash"    43 minutes ago      Up 43 minutes                           hungry_payne
+    ```
+
+    Notice that the tag on the container is marked with the `dry-run-test` branch name.
+
+
+## Task 3. Make a code change
+
+At this point, you have experienced the "Moby inception" technique. That is,
+you have:
+
+* forked and cloned the Moby Engine code repository
+* created a feature branch for development
+* created and started an Engine development container from your branch
+* built a binary inside of your development container
+* launched a `docker` daemon using your newly compiled binary
+* called the `docker` client to run a `hello-world` container inside
+  your development container
+
+Running the `make BIND_DIR=. shell` command mounted your local Docker repository source into
+your Docker container.
+
+   > **Note**: Inspecting the `Dockerfile` shows a `COPY . /go/src/github.com/docker/docker` instruction, suggesting that dynamic code changes will _not_ be reflected in the container. However inspecting the `Makefile` shows that the current working directory _will_ be mounted via a `-v` volume mount.
+
+When you start to develop code though, you'll
+want to iterate code changes and builds inside the container. If you have
+followed this guide exactly, you have a bash shell running a development
+container.
+
+Try a simple code change and see it reflected in your container. For this
+example, you'll edit the help for the `attach` subcommand.
+
+1. If you don't have one, open a terminal in your local host.
+
+2. Make sure you are in your `moby-fork` repository.
+
+   ```none
+   $ pwd
+   /Users/mary/go/src/github.com/moxiegirl/moby-fork
+   ```
+
+   Your location should be different because, at least, your username is
+   different.
+
+3. Open the `cmd/dockerd/docker.go` file.
+
+4. Edit the command's help message.
+
+   For example, you can edit this line:
+
+   ```go
+   Short:         "A self-sufficient runtime for containers.",
+   ```
+
+   And change it to this:
+
+   ```go
+   Short:         "A self-sufficient and really fun runtime for containers.",
+   ```
+
+5. Save and close the `cmd/dockerd/docker.go` file.
+
+6. Go to your running docker development container shell.
+
+7. Rebuild the binary by using the command `hack/make.sh binary` in the docker development container shell.
+
+8. Stop Docker if it is running.
+
+9. Copy the binaries to **/usr/bin** by entering the following commands in the docker development container shell.
+
+   ```
+   hack/make.sh binary install-binary
+   ```
+
+10. To view your change, run the `dockerd --help` command in the docker development container shell.
+
+   ```bash
+   root@b0cb4f22715d:/go/src/github.com/moby/moby# dockerd --help
+
+   Usage:        dockerd COMMAND
+
+   A self-sufficient and really fun runtime for containers.
+
+   Options:
+   ...
+
+   ```
+
+You've just done the basic workflow for changing the Engine code base. You made
+your code changes in your feature branch. Then, you updated the binary in your
+development container and tried your change out. If you were making a bigger
+change, you might repeat or iterate through this flow several times.
+
+## Where to go next
+
+Congratulations, you have successfully achieved Docker inception. You've had a
+small experience of the development process. You've set up your development
+environment and verified almost all the essential processes you need to
+contribute. Of course, before you start contributing, [you'll need to learn one
+more piece of the development process, the test framework](test.md).
diff --git a/docs/contributing/set-up-git.md b/docs/contributing/set-up-git.md
new file mode 100644
index 0000000..f320c27
--- /dev/null
+++ b/docs/contributing/set-up-git.md
@@ -0,0 +1,280 @@
+### Configure Git for contributing
+
+Work through this page to configure Git and a repository you'll use throughout
+the Contributor Guide. The work you do further in the guide, depends on the work
+you do here.
+
+## Task 1. Fork and clone the Moby code
+
+Before contributing, you first fork the Moby code repository. A fork copies
+a repository at a particular point in time. GitHub tracks for you where a fork
+originates.
+
+As you make contributions, you change your fork's code. When you are ready,
+you make a pull request back to the original Docker repository. If you aren't
+familiar with this workflow, don't worry, this guide walks you through all the
+steps.
+
+To fork and clone Moby:
+
+1. Open a browser and log into GitHub with your account.
+
+2. Go to the <a href="https://github.com/moby/moby"
+target="_blank">moby/moby repository</a>.
+
+3. Click the "Fork" button in the upper right corner of the GitHub interface.
+
+    ![Branch Signature](images/fork_docker.png)
+
+    GitHub forks the repository to your GitHub account. The original
+    `moby/moby` repository becomes a new fork `YOUR_ACCOUNT/moby` under
+    your account.
+
+4. Copy your fork's clone URL from GitHub.
+
+    GitHub allows you to use HTTPS or SSH protocols for clones. You can use the
+    `git` command line or clients like Subversion to clone a repository.
+
+    ![Copy clone URL](images/copy_url.png)
+
+    This guide assume you are using the HTTPS protocol and the `git` command
+    line. If you are comfortable with SSH and some other tool, feel free to use
+    that instead. You'll need to convert what you see in the guide to what is
+    appropriate to your tool.
+
+5. Open a terminal window on your local host and change to your home directory.
+
+   ```bash
+   $ cd ~
+   ```
+
+    In Windows, you'll work in your Docker Quickstart Terminal window instead of
+    Powershell or a `cmd` window.
+
+6. Create a `repos` directory.
+
+   ```bash
+   $ mkdir repos
+   ```
+
+7. Change into your `repos` directory.
+
+   ```bash
+   $ cd repos
+   ```
+
+8. Clone the fork to your local host into a repository called `moby-fork`.
+
+   ```bash
+   $ git clone https://github.com/moxiegirl/moby.git moby-fork
+   ```
+
+    Naming your local repo `moby-fork` should help make these instructions
+    easier to follow; experienced coders don't typically change the name.
+
+9. Change directory into your new `moby-fork` directory.
+
+   ```bash
+   $ cd moby-fork
+   ```
+
+    Take a moment to familiarize yourself with the repository's contents. List
+    the contents.
+
+## Task 2. Set your signature and an upstream remote
+
+When you contribute to Docker, you must certify you agree with the
+<a href="http://developercertificate.org/" target="_blank">Developer Certificate of Origin</a>.
+You indicate your agreement by signing your `git` commits like this:
+
+```
+Signed-off-by: Pat Smith <pat.smith@email.com>
+```
+
+To create a signature, you configure your username and email address in Git.
+You can set these globally or locally on just your `moby-fork` repository.
+You must sign with your real name. You can sign your git commit automatically
+with `git commit -s`. Moby does not accept anonymous contributions or contributions
+through pseudonyms.
+
+As you change code in your fork, you'll want to keep it in sync with the changes
+others make in the `moby/moby` repository. To make syncing easier, you'll
+also add a _remote_ called `upstream` that points to `moby/moby`. A remote
+is just another project version hosted on the internet or network.
+
+To configure your username, email, and add a remote:
+
+1. Change to the root of your `moby-fork` repository.
+
+   ```bash
+   $ cd moby-fork
+   ```
+
+2. Set your `user.name` for the repository.
+
+   ```bash
+   $ git config --local user.name "FirstName LastName"
+   ```
+
+3. Set your `user.email` for the repository.
+
+   ```bash
+   $ git config --local user.email "emailname@mycompany.com"
+   ```
+
+4. Set your local repo to track changes upstream, on the `moby/moby` repository.
+
+   ```bash
+   $ git remote add upstream https://github.com/moby/moby.git
+   ```
+
+5. Check the result in your `git` configuration.
+
+   ```bash
+   $ git config --local -l
+   core.repositoryformatversion=0
+   core.filemode=true
+   core.bare=false
+   core.logallrefupdates=true
+   remote.origin.url=https://github.com/moxiegirl/moby.git
+   remote.origin.fetch=+refs/heads/*:refs/remotes/origin/*
+   branch.master.remote=origin
+   branch.master.merge=refs/heads/master
+   user.name=Mary Anthony
+   user.email=mary@docker.com
+   remote.upstream.url=https://github.com/moby/moby.git
+   remote.upstream.fetch=+refs/heads/*:refs/remotes/upstream/*
+   ```
+
+	To list just the remotes use:
+
+   ```bash
+   $ git remote -v
+   origin	https://github.com/moxiegirl/moby.git (fetch)
+   origin	https://github.com/moxiegirl/moby.git (push)
+   upstream https://github.com/moby/moby.git (fetch)
+   upstream https://github.com/moby/moby.git (push)
+   ```
+
+## Task 3. Create and push a branch
+
+As you change code in your fork, make your changes on a repository branch.
+The branch name should reflect what you are working on. In this section, you
+create a branch, make a change, and push it up to your fork.
+
+This branch is just for testing your config for this guide. The changes are part
+of a dry run, so the branch name will be dry-run-test. To create and push
+the branch to your fork on GitHub:
+
+1. Open a terminal and go to the root of your `moby-fork`.
+
+   ```bash
+   $ cd moby-fork
+   ```
+
+2. Create a `dry-run-test` branch.
+
+   ```bash
+   $ git checkout -b dry-run-test
+   ```
+
+    This command creates the branch and switches the repository to it.
+
+3. Verify you are in your new branch.
+
+   ```bash
+   $ git branch
+   * dry-run-test
+     master
+   ```
+
+    The current branch has an * (asterisk) marker. So, these results show you
+    are on the right branch.
+
+4. Create a `TEST.md` file in the repository's root.
+
+   ```bash
+   $ touch TEST.md
+   ```
+
+5. Edit the file and add your email and location.
+
+    ![Add your information](images/contributor-edit.png)
+
+    You can use any text editor you are comfortable with.
+
+6. Save and close the file.
+
+7. Check the status of your branch.
+
+   ```bash
+   $ git status
+   On branch dry-run-test
+   Untracked files:
+     (use "git add <file>..." to include in what will be committed)
+
+       TEST.md
+
+   nothing added to commit but untracked files present (use "git add" to track)
+   ```
+
+	You've only changed the one file. It is untracked so far by git.
+
+8. Add your file.
+
+   ```bash
+   $ git add TEST.md
+   ```
+
+    That is the only _staged_ file. Stage is fancy word for work that Git is
+    tracking.
+
+9. Sign and commit your change.
+
+   ```bash
+   $ git commit -s -m "Making a dry run test."
+   [dry-run-test 6e728fb] Making a dry run test
+    1 file changed, 1 insertion(+)
+    create mode 100644 TEST.md
+   ```
+
+    Commit messages should have a short summary sentence of no more than 50
+    characters. Optionally, you can also include a more detailed explanation
+    after the summary. Separate the summary from any explanation with an empty
+    line.
+
+10. Push your changes to GitHub.
+
+    ```bash
+    $ git push --set-upstream origin dry-run-test
+    Username for 'https://github.com': moxiegirl
+    Password for 'https://moxiegirl@github.com':
+    ```
+
+    Git prompts you for your GitHub username and password. Then, the command
+    returns a result.
+
+    ```bash
+    Counting objects: 13, done.
+    Compressing objects: 100% (2/2), done.
+    Writing objects: 100% (3/3), 320 bytes | 0 bytes/s, done.
+    Total 3 (delta 1), reused 0 (delta 0)
+    To https://github.com/moxiegirl/moby.git
+     * [new branch]      dry-run-test -> dry-run-test
+    Branch dry-run-test set up to track remote branch dry-run-test from origin.
+    ```
+
+11. Open your browser to GitHub.
+
+12. Navigate to your Moby fork.
+
+13. Make sure the `dry-run-test` branch exists, that it has your commit, and the
+commit is signed.
+
+    ![Branch Signature](images/branch-sig.png)
+
+## Where to go next
+
+Congratulations, you have finished configuring both your local host environment
+and Git for contributing. In the next section you'll [learn how to set up and
+work in a Moby development container](set-up-dev-env.md).
diff --git a/docs/contributing/software-req-win.md b/docs/contributing/software-req-win.md
new file mode 100644
index 0000000..3be4327
--- /dev/null
+++ b/docs/contributing/software-req-win.md
@@ -0,0 +1,177 @@
+### Build and test Moby on Windows
+
+This page explains how to get the software you need to build, test, and run the
+Moby source code for Windows and setup the required software and services:
+
+- Windows containers
+- GitHub account
+- Git
+
+## Prerequisites
+
+### 1. Windows Server 2016 or Windows 10 with all Windows updates applied
+
+The major build number must be at least 14393. This can be confirmed, for example,
+by running the following from an elevated PowerShell prompt - this sample output
+is from a fully up to date machine as at mid-November 2016:
+
+
+    PS C:\> $(gin).WindowsBuildLabEx
+    14393.447.amd64fre.rs1_release_inmarket.161102-0100
+
+### 2. Git for Windows (or another git client) must be installed
+
+https://git-scm.com/download/win.
+
+### 3. The machine must be configured to run containers
+
+For example, by following the quick start guidance at
+https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md
+
+### 4. If building in a Hyper-V VM
+
+For Windows Server 2016 using Windows Server containers as the default option,
+it is recommended you have at least 1GB of memory assigned;
+For Windows 10 where Hyper-V Containers are employed, you should have at least
+4GB of memory assigned.
+Note also, to run Hyper-V containers in a VM, it is necessary to configure the VM
+for nested virtualization.
+
+## Usage
+
+The following steps should be run from an elevated Windows PowerShell prompt.
+
+>**Note**:  In a default installation of containers on Windows following the quick-start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start,
+the `docker.exe` client must run elevated to be able to connect to the daemon).
+
+### 1. Windows containers
+
+To test and run the Windows Moby engine, you need a system that supports Windows Containers:
+
+- Windows 10 Anniversary Edition
+- Windows Server 2016 running in a VM, on bare metal or in the cloud
+
+Check out the [getting started documentation](https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md) for details.
+
+### 2. GitHub account
+
+To contribute to the Docker project, you need a <a href="https://github.com" target="_blank">GitHub account</a>.
+A free account is fine. All the Moby project repositories are public and visible to everyone.
+
+This guide assumes that you have basic familiarity with Git and Github terminology
+and usage.
+Refer to [GitHub For Beginners: Don’t Get Scared, Get Started](http://readwrite.com/2013/09/30/understanding-github-a-journey-for-beginners-part-1/)
+to get up to speed on Github.
+
+### 3. Git
+
+In PowerShell, run:
+
+    Invoke-Webrequest "https://github.com/git-for-windows/git/releases/download/v2.7.2.windows.1/Git-2.7.2-64-bit.exe" -OutFile git.exe -UseBasicParsing
+    Start-Process git.exe -ArgumentList '/VERYSILENT /SUPPRESSMSGBOXES /CLOSEAPPLICATIONS /DIR=c:\git\' -Wait
+    setx /M PATH "$env:Path;c:\git\cmd"
+
+You are now ready clone and build the Moby source code.
+
+### 4. Clone Moby
+
+In a new (to pick up the path change) PowerShell prompt, run:
+
+    git clone https://github.com/moby/moby
+    cd moby
+
+This clones the main Moby repository. Check out [Moby Project](https://mobyproject.org)
+to learn about the other software that powers the Moby platform.
+
+### 5. Build and run
+
+Create a builder-container with the Moby source code. You can change the source
+code on your system and rebuild any time:
+
+    docker build -t nativebuildimage -f .\Dockerfile.windows .
+    docker build -t nativebuildimage -f Dockerfile.windows -m 2GB .  # (if using Hyper-V containers)
+
+To build Moby, run:
+
+    $DOCKER_GITCOMMIT=(git rev-parse --short HEAD)
+    docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT nativebuildimage hack\make.ps1 -Binary
+    docker run --name binaries -e DOCKER_GITCOMMIT=$DOCKER_GITCOMMIT -m 2GB nativebuildimage hack\make.ps1 -Binary  # (if using Hyper-V containers)
+
+Copy out the resulting Windows Moby Engine binary to `dockerd.exe` in the
+current directory:
+
+    docker cp binaries:C:\go\src\github.com\moby\moby\bundles\docker.exe docker.exe
+    docker cp binaries:C:\go\src\github.com\moby\moby\bundles\dockerd.exe dockerd.exe
+
+To test it, stop the system Docker daemon and start the one you just built:
+
+    Stop-Service Docker
+    .\dockerd.exe -D
+
+The other make targets work too, to run unit tests try:
+`docker run --rm docker-builder sh -c 'cd /c/go/src/github.com/moby/moby; hack/make.sh test-unit'`.
+
+### 6. Remove the interim binaries container
+
+_(Optional)_
+
+    docker rm binaries
+
+### 7. Remove the image
+
+_(Optional)_
+
+It may be useful to keep this image around if you need to build multiple times.
+Then you can take advantage of the builder cache to have an image which has all
+the components required to build the binaries already installed.
+
+    docker rmi nativebuildimage
+
+## Validation
+
+The validation tests can only run directly on the host.
+This is because they calculate information from the git repo, but the .git directory
+is not passed into the image as it is excluded via `.dockerignore`.
+Run the following from a Windows PowerShell prompt (elevation is not required):
+(Note Go must be installed to run these tests)
+
+    hack\make.ps1 -DCO -PkgImports -GoFormat
+
+## Unit tests
+
+To run unit tests, ensure you have created the nativebuildimage above.
+Then run one of the following from an (elevated) Windows PowerShell prompt:
+
+    docker run --rm nativebuildimage hack\make.ps1 -TestUnit
+    docker run --rm -m 2GB nativebuildimage hack\make.ps1 -TestUnit  # (if using Hyper-V containers)
+
+To run unit tests and binary build, ensure you have created the nativebuildimage above.
+Then run one of the following from an (elevated) Windows PowerShell prompt:
+
+    docker run nativebuildimage hack\make.ps1 -All
+    docker run -m 2GB nativebuildimage hack\make.ps1 -All  # (if using Hyper-V containers)
+
+## Windows limitations
+
+Don't attempt to use a bind mount to pass a local directory as the bundles
+target directory.
+It does not work (golang attempts for follow a mapped folder incorrectly).
+Instead, use docker cp as per the example.
+
+`go.zip` is not removed from the image as it is used by the Windows CI servers
+to ensure the host and image are running consistent versions of go.
+
+Nanoserver support is a work in progress. Although the image will build if the
+`FROM` statement is updated, it will not work when running autogen through `hack\make.ps1`.
+It is suspected that the required GCC utilities (eg gcc, windres, windmc) silently
+quit due to the use of console hooks which are not available.
+
+The docker integration tests do not currently run in a container on Windows,
+predominantly due to Windows not supporting privileged mode, so anything using a volume would fail.
+They (along with the rest of the docker CI suite) can be run using
+https://github.com/jhowardmsft/docker-w2wCIScripts/blob/master/runCI/Invoke-DockerCI.ps1.
+
+## Where to go next
+
+In the next section, you'll [learn how to set up and configure Git for
+contributing to Moby](set-up-git.md).
diff --git a/docs/contributing/software-required.md b/docs/contributing/software-required.md
new file mode 100644
index 0000000..b14c6f9
--- /dev/null
+++ b/docs/contributing/software-required.md
@@ -0,0 +1,94 @@
+### Get the required software for Linux or macOS
+
+This page explains how to get the software you need to use a Linux or macOS
+machine for Moby development. Before you begin contributing you must have:
+
+*  a GitHub account
+* `git`
+* `make`
+* `docker`
+
+You'll notice that `go`, the language that Moby is written in, is not listed.
+That's because you don't need it installed; Moby's development environment
+provides it for you. You'll learn more about the development environment later.
+
+## Task 1. Get a GitHub account
+
+To contribute to the Moby project, you will need a <a
+href="https://github.com" target="_blank">GitHub account</a>. A free account is
+fine. All the Moby project repositories are public and visible to everyone.
+
+You should also have some experience using both the GitHub application and `git`
+on the command line.
+
+## Task 2. Install git
+
+Install `git` on your local system. You can check if `git` is on already on your
+system and properly installed with the following command:
+
+```bash
+$ git --version
+```
+
+This documentation is written using `git` version 2.2.2. Your version may be
+different depending on your OS.
+
+## Task 3. Install make
+
+Install `make`. You can check if `make` is on your system with the following
+command:
+
+```bash
+$ make -v
+```
+
+This documentation is written using GNU Make 3.81. Your version may be different
+depending on your OS.
+
+## Task 4. Install or upgrade Docker
+
+If you haven't already, install the Docker software using the
+<a href="https://docs.docker.com/engine/installation/" target="_blank">instructions for your operating system</a>.
+If you have an existing installation, check your version and make sure you have
+the latest Docker.
+
+To check if `docker` is already installed on Linux:
+
+```bash
+docker --version
+Docker version 17.10.0-ce, build f4ffd25
+```
+
+On macOS or Windows, you should have installed Docker for Mac or
+Docker for Windows.
+
+```bash
+$ docker --version
+Docker version 17.10.0-ce, build f4ffd25
+```
+
+## Tip for Linux users
+
+This guide assumes you have added your user to the `docker` group on your system.
+To check, list the group's contents:
+
+```
+$ getent group docker
+docker:x:999:ubuntu
+```
+
+If the command returns no matches, you have two choices. You can preface this
+guide's `docker` commands with `sudo` as you work. Alternatively, you can add
+your user to the `docker` group as follows:
+
+```bash
+$ sudo usermod -aG docker ubuntu
+```
+
+You must log out and log back in for this modification to take effect.
+
+
+## Where to go next
+
+In the next section, you'll [learn how to set up and configure Git for
+contributing to Moby](set-up-git.md).
diff --git a/docs/contributing/test.md b/docs/contributing/test.md
new file mode 100644
index 0000000..9a63a12
--- /dev/null
+++ b/docs/contributing/test.md
@@ -0,0 +1,234 @@
+### Run tests
+
+Contributing includes testing your changes. If you change the Moby code, you
+may need to add a new test or modify an existing test. Your contribution could
+even be adding tests to Moby. For this reason, you need to know a little
+about Moby's test infrastructure.
+
+This section describes tests you can run in the `dry-run-test` branch of your Docker
+fork. If you have followed along in this guide, you already have this branch.
+If you don't have this branch, you can create it or simply use another of your
+branches.
+
+## Understand how to test Moby
+
+Moby tests use the Go language's test framework. In this framework, files
+whose names end in `_test.go` contain test code; you'll find test files like
+this throughout the Moby repo. Use these files for inspiration when writing
+your own tests. For information on Go's test framework, see <a
+href="http://golang.org/pkg/testing/" target="_blank">Go's testing package
+documentation</a> and the <a href="http://golang.org/cmd/go/#hdr-Test_packages"
+target="_blank">go test help</a>.
+
+You are responsible for _unit testing_ your contribution when you add new or
+change existing Moby code. A unit test is a piece of code that invokes a
+single, small piece of code (_unit of work_) to verify the unit works as
+expected.
+
+Depending on your contribution, you may need to add _integration tests_. These
+are tests that combine two or more work units into one component. These work
+units each have unit tests and then, together, integration tests that test the
+interface between the components. The `integration` and `integration-cli`
+directories in the Docker repository contain integration test code.
+
+Testing is its own specialty. If you aren't familiar with testing techniques,
+there is a lot of information available to you on the Web. For now, you should
+understand that, the Docker maintainers may ask you to write a new test or
+change an existing one.
+
+## Run tests on your local host
+
+Before submitting a pull request with a code change, you should run the entire
+Moby Engine test suite. The `Makefile` contains a target for the entire test
+suite, named `test`. Also, it contains several targets for
+testing:
+
+| Target                 | What this target does                          |
+| ---------------------- | ---------------------------------------------- |
+| `test`                 | Run the unit, integration, and docker-py tests |
+| `test-unit`            | Run just the unit tests                        |
+| `test-integration-cli` | Run the integration tests for the CLI          |
+| `test-docker-py`       | Run the tests for the Docker API client        |
+
+Running the entire test suite on your current repository can take over half an
+hour. To run the test suite, do the following:
+
+1.  Open a terminal on your local host.
+
+2.  Change to the root of your Docker repository.
+
+    ```bash
+    $ cd moby-fork
+    ```
+
+3.  Make sure you are in your development branch.
+
+    ```bash
+    $ git checkout dry-run-test
+    ```
+
+4.  Run the `make test` command.
+
+    ```bash
+    $ make test
+    ```
+
+    This command does several things, it creates a container temporarily for
+    testing. Inside that container, the `make`:
+
+    * creates a new binary
+    * cross-compiles all the binaries for the various operating systems
+    * runs all the tests in the system
+
+    It can take approximate one hour to run all the tests. The time depends
+    on your host performance. The default timeout is 60 minutes, which is
+    defined in `hack/make.sh` (`${TIMEOUT:=60m}`). You can modify the timeout
+    value on the basis of your host performance. When they complete
+    successfully, you see the output concludes with something like this:
+
+    ```none
+    Ran 68 tests in 79.135s
+    ```
+
+## Run targets inside a development container
+
+If you are working inside a development container, you use the
+`hack/make.sh` script to run tests. The `hack/make.sh` script doesn't
+have a single target that runs all the tests. Instead, you provide a single
+command line with multiple targets that does the same thing.
+
+Try this now.
+
+1.  Open a terminal and change to the `moby-fork` root.
+
+2.  Start a Moby development image.
+
+    If you are following along with this guide, you should have a
+    `dry-run-test` image.
+
+    ```bash
+    $ docker run --privileged --rm -ti -v `pwd`:/go/src/github.com/moby/moby dry-run-test /bin/bash
+    ```
+
+3.  Run the tests using the `hack/make.sh` script.
+
+    ```bash
+    root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit test-integration-cli test-docker-py
+    ```
+
+    The tests run just as they did within your local host.
+
+    Of course, you can also run a subset of these targets too. For example, to run
+    just the unit tests:
+
+    ```bash
+    root@5f8630b873fe:/go/src/github.com/moby/moby# hack/make.sh dynbinary binary cross test-unit
+    ```
+
+    Most test targets require that you build these precursor targets first:
+    `dynbinary binary cross`
+
+
+## Run unit tests
+
+We use golang standard [testing](https://golang.org/pkg/testing/)
+package or [gocheck](https://labix.org/gocheck) for our unit tests.
+
+You can use the `TESTDIRS` environment variable to run unit tests for
+a single package.
+
+```bash
+$ TESTDIRS='opts' make test-unit
+```
+
+You can also use the `TESTFLAGS` environment variable to run a single test. The
+flag's value is passed as arguments to the `go test` command. For example, from
+your local host you can run the `TestBuild` test with this command:
+
+```bash
+$ TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit
+```
+
+On unit tests, it's better to use `TESTFLAGS` in combination with
+`TESTDIRS` to make it quicker to run a specific test.
+
+```bash
+$ TESTDIRS='opts' TESTFLAGS='-test.run ^TestValidateIPAddress$' make test-unit
+```
+
+## Run integration tests
+
+We use [gocheck](https://labix.org/gocheck) for our integration-cli tests.
+You can use the `TESTFLAGS` environment variable to run a single test. The
+flag's value is passed as arguments to the `go test` command. For example, from
+your local host you can run the `TestBuild` test with this command:
+
+```bash
+$ TESTFLAGS='-check.f DockerSuite.TestBuild*' make test-integration-cli
+```
+
+To run the same test inside your Docker development container, you do this:
+
+```bash
+root@5f8630b873fe:/go/src/github.com/moby/moby# TESTFLAGS='-check.f TestBuild*' hack/make.sh binary test-integration-cli
+```
+
+## Test the Windows binary against a Linux daemon
+
+This explains how to test the Windows binary on a Windows machine set up as a
+development environment. The tests will be run against a daemon
+running on a remote Linux machine. You'll use **Git Bash** that came with the
+Git for Windows installation. **Git Bash**, just as it sounds, allows you to
+run a Bash terminal on Windows.
+
+1.  If you don't have one open already, start a Git Bash terminal.
+
+    ![Git Bash](images/git_bash.png)
+
+2.  Change to the `moby` source directory.
+
+    ```bash
+    $ cd /c/gopath/src/github.com/moby/moby
+    ```
+
+3.  Set `DOCKER_REMOTE_DAEMON` as follows:
+
+    ```bash
+    $ export DOCKER_REMOTE_DAEMON=1
+    ```
+
+4.  Set `DOCKER_TEST_HOST` to the `tcp://IP_ADDRESS:2376` value; substitute your
+    Linux machines actual IP address. For example:
+
+    ```bash
+    $ export DOCKER_TEST_HOST=tcp://213.124.23.200:2376
+    ```
+
+5.  Make the binary and run the tests:
+
+    ```bash
+    $ hack/make.sh binary test-integration-cli
+    ```
+    Some tests are skipped on Windows for various reasons. You can see which
+    tests were skipped by re-running the make and passing in the
+   `TESTFLAGS='-test.v'` value. For example
+
+    ```bash
+    $ TESTFLAGS='-test.v' hack/make.sh binary test-integration-cli
+    ```
+
+    Should you wish to run a single test such as one with the name
+    'TestExample', you can pass in `TESTFLAGS='-check.f TestExample'`. For
+    example
+
+    ```bash
+    $ TESTFLAGS='-check.f TestExample' hack/make.sh binary test-integration-cli
+    ```
+
+You can now choose to make changes to the Moby source or the tests. If you
+make any changes, just run these commands again.
+
+## Where to go next
+
+Congratulations, you have successfully completed the basics you need to
+understand the Moby test framework.
diff --git a/docs/contributing/who-written-for.md b/docs/contributing/who-written-for.md
new file mode 100644
index 0000000..1431f42
--- /dev/null
+++ b/docs/contributing/who-written-for.md
@@ -0,0 +1,49 @@
+### README first
+
+This section of the documentation contains a guide for Moby project users who want to
+contribute code or documentation to the Moby Engine project. As a community, we
+share rules of behavior and interaction. Make sure you are familiar with the <a
+href="https://github.com/moby/moby/blob/master/CONTRIBUTING.md#docker-community-guidelines"
+target="_blank">community guidelines</a> before continuing.
+
+## Where and what you can contribute
+
+The Moby project consists of not just one but several repositories on GitHub.
+So, in addition to the `moby/moby` repository, there is the
+`containerd/containerd` repo, the `moby/buildkit` repo, and several more.
+Contribute to any of these and you contribute to the Moby project.
+
+Not all Moby repositories use the Go language. Also, each repository has its
+own focus area. So, if you are an experienced contributor, think about
+contributing to a Moby project repository that has a language or a focus area you are
+familiar with.
+
+If you are new to the open source community, to Moby, or to formal
+programming, you should start out contributing to the `moby/moby`
+repository. Why? Because this guide is written for that repository specifically.
+
+Finally, code or documentation isn't the only way to contribute. You can report
+an issue, add to discussions in our community channel, write a blog post, or
+take a usability test. You can even propose your own type of contribution.
+Right now we don't have a lot written about this yet, but feel free to open an issue
+to discuss other contributions.
+
+## How to use this guide
+
+This is written for the distracted, the overworked, the sloppy reader with fair
+`git` skills and a failing memory for the GitHub GUI. The guide attempts to
+explain how to use the Moby Engine development environment as precisely,
+predictably, and procedurally as possible.
+
+Users who are new to Engine development should start by setting up their
+environment. Then, they should try a simple code change. After that, you should
+find something to work on or propose a totally new change.
+
+If you are a programming prodigy, you still may find this documentation useful.
+Please feel free to skim past information you find obvious or boring.
+
+## How to get started
+
+Start by getting the software you require. If you are on Mac or Linux, go to
+[get the required software for Linux or macOS](software-required.md). If you are
+on Windows, see [get the required software for Windows](software-req-win.md).
diff --git a/docs/static_files/docker-logo-compressed.png b/docs/static_files/docker-logo-compressed.png
deleted file mode 100644
index 717d09d..0000000
--- a/docs/static_files/docker-logo-compressed.png
+++ /dev/null
Binary files differ
diff --git a/hack/Jenkins/W2L/postbuild.sh b/hack/Jenkins/W2L/postbuild.sh
deleted file mode 100644
index 662e2dc..0000000
--- a/hack/Jenkins/W2L/postbuild.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-set +x
-set +e 
-
-echo ""
-echo ""
-echo "---"
-echo "Now starting POST-BUILD steps"
-echo "---"
-echo ""
-
-echo INFO: Pointing to $DOCKER_HOST
-
-if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then
-	echo INFO: Removing containers...
-	! docker rm -vf $(docker ps -aq)
-fi
-
-# Remove all images which don't have docker or debian in the name
-if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }' | wc -l) -eq 0 ]; then 
-	echo INFO: Removing images...
-	! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }') 
-fi
-
-# Kill off any instances of git, go and docker, just in case
-! taskkill -F -IM git.exe -T >& /dev/null
-! taskkill -F -IM go.exe -T >& /dev/null
-! taskkill -F -IM docker.exe -T >& /dev/null
-
-# Remove everything
-! cd /c/jenkins/gopath/src/github.com/docker/docker
-! rm -rfd * >& /dev/null
-! rm -rfd .* >& /dev/null
-
-echo INFO: Cleanup complete
-exit 0
\ No newline at end of file
diff --git a/hack/Jenkins/W2L/setup.sh b/hack/Jenkins/W2L/setup.sh
deleted file mode 100644
index a3d86b8..0000000
--- a/hack/Jenkins/W2L/setup.sh
+++ /dev/null
@@ -1,309 +0,0 @@
-# Jenkins CI script for Windows to Linux CI.
-# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable.
-set +xe
-SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016"
-
-# TODO to make (even) more resilient: 
-#  - Wait for daemon to be running before executing docker commands
-#  - Check if jq is installed
-#  - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version
-#  - Make sure we are not running as local system. Can't do until all Azure nodes are updated.
-#  - Error if docker versions are not equal. Can't do until all Azure nodes are updated
-#  - Error if go versions are not equal. Can't do until all Azure nodes are updated.
-#  - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64"
-#  - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind
-#  - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP
-#  - Consider cross building the Windows binary and copy across. That's a bit of a heavy lift. Only reason
-#    for doing that is that it mirrors the actual release process for docker.exe which is cross-built.
-#    However, should absolutely not be a problem if built natively, so nit-picking.
-#  - Tidy up of images and containers. Either here, or in the teardown script.
-
-ec=0
-uniques=1
-echo INFO: Started at `date`. Script version $SCRIPT_VER
-
-
-# !README!
-# There are two daemons running on the remote Linux host:
-# 	- outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon
-#			from the sources matching the PR.
-#	- inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted
-#			(2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376).
-#			The windows integration tests are run against this inner daemon.
-
-# get the ip, inner and outer ports.
-ip="${DOCKER_HOST#*://}"
-port_outer="${ip#*:}"
-# inner port is like outer port with last two digits inverted.
-port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/')
-ip="${ip%%:*}"
-
-echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner"
-
-# If TLS is enabled
-if [ -n "$DOCKER_TLS_VERIFY" ]; then
-	protocol=https
-	if [ -z "$DOCKER_MACHINE_NAME" ]; then
-		ec=1
-		echo "ERROR: DOCKER_MACHINE_NAME is undefined"
-	fi
-	certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME)
-	curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem"
-	run_extra_args="-v tlscerts:/etc/docker"
-	daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem"
-else
-	protocol=http
-fi
-
-# Save for use by make.sh and scripts it invokes
-export MAIN_DOCKER_HOST="tcp://$ip:$port_inner"
-
-# Verify we can get the remote node to respond to _ping
-if [ $ec -eq 0 ]; then
-	reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping`
-	if [ "$reply" != "OK" ]; then
-		ec=1
-		echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node"
-		echo "       at $ip:$port_outer when called with an http request for '_ping'. This implies that"
-		echo "       either the daemon has crashed/is not running, or the Linux node is unavailable."
-		echo
-		echo "       A regular ping to the remote Linux node is below. It should reply. If not, the"
-		echo "       machine cannot be reached at all and may have crashed. If it does reply, it is"
-		echo "       likely a case of the Linux daemon not running or having crashed, which requires"
-		echo "       further investigation."
-		echo
-		echo "       Try re-running this CI job, or ask on #docker-dev or #docker-maintainers"
-		echo "       for someone to perform further diagnostics, or take this node out of rotation."
-		echo
-		ping $ip
-	else
-		echo "INFO: The Linux nodes outer daemon replied to a ping. Good!"
-	fi 
-fi
-
-# Get the version from the remote node. Note this may fail if jq is not installed.
-# That's probably worth checking to make sure, just in case.
-if [ $ec -eq 0 ]; then
-	remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'`
-	echo "INFO: Remote daemon is running docker version $remoteVersion"
-fi
-
-# Compare versions. We should really fail if result is no 1. Output at end of script.
-if [ $ec -eq 0 ]; then
-	uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l`
-fi
-
-# Make sure we are in repo
-if [ $ec -eq 0 ]; then
-	if [ ! -d hack ]; then
-		echo "ERROR: Are you sure this is being launched from a the root of docker repository?"
-		echo "       If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker."
-                echo "       Current directory is `pwd`"
-		ec=1
-	fi
-fi
-
-# Are we in split binary mode?
-if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then
-    splitBinary=0
-	echo "INFO: Running in single binary mode"
-else
-    splitBinary=1
-	echo "INFO: Running in split binary mode"
-fi
-
-
-# Get the commit has and verify we have something
-if [ $ec -eq 0 ]; then
-	export COMMITHASH=$(git rev-parse --short HEAD)
-	echo INFO: Commit hash is $COMMITHASH
-	if [ -z $COMMITHASH ]; then
-		echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?"
-		ec=1
-	fi
-fi
-
-# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not
-# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment
-# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which
-# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system...
-if [ $ec -eq 0 ]; then
-	export TEMP=/c/CI/CI-$COMMITHASH
-	export TMP=$TEMP
-	/usr/bin/mkdir -p $TEMP  # Make sure Linux mkdir for -p
-fi
-
-# Tidy up time
-if [ $ec -eq 0 ]; then
-	echo INFO: Deleting pre-existing containers and images...
-    
-	# Force remove all containers based on a previously built image with this commit
-	! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null
-    
-	# Force remove any container with this commithash as a name
-	! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null
-
-	# This SHOULD never happen, but just in case, also blow away any containers
-	# that might be around.
-	! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then
-		echo WARN: There were some leftover containers. Cleaning them up.
-		! docker rm -f $(docker ps -aq)
-	fi
-	
-    # Force remove the image if it exists
-	! docker rmi -f "docker-$COMMITHASH" &>/dev/null
-fi
-
-# Provide the docker version for debugging purposes. If these fail, game over. 
-# as the Linux box isn't responding for some reason.
-if [ $ec -eq 0 ]; then
-	echo INFO: Docker version and info of the outer daemon on the Linux node
-	echo
-	docker version
-	ec=$?
-	if [ 0 -ne $ec ]; then
-		echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?"
-	fi
-	echo
-fi
-
-# Same as above, but docker info
-if [ $ec -eq 0 ]; then
-	echo
-	docker info
-	ec=$?
-	if [ 0 -ne $ec ]; then
-		echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?"
-	fi
-	echo
-fi
-
-# build the daemon image
-if [ $ec -eq 0 ]; then
-	echo "INFO: Running docker build on Linux host at $DOCKER_HOST"
-	if [ $splitBinary -eq 0 ]; then
-		set -x
-		docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" .
-    cat <<EOF | docker build --rm --force-rm -t "docker:$COMMITHASH" -
-FROM docker:$COMMITHASH
-RUN hack/make.sh binary
-RUN cp bundles/latest/binary/docker /bin/docker 
-CMD dockerd -D -H tcp://0.0.0.0:$port_inner $daemon_extra_args
-EOF
-	else
-		set -x
-		docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" .
-    cat <<EOF | docker build --rm --force-rm -t "docker:$COMMITHASH" -
-FROM docker:$COMMITHASH
-RUN hack/make.sh binary
-RUN cp bundles/latest/binary-daemon/dockerd /bin/dockerd 
-CMD dockerd -D -H tcp://0.0.0.0:$port_inner $daemon_extra_args
-EOF
-
-	fi
-	ec=$?
-	set +x
-	if [ 0 -ne $ec ]; then
-		echo "ERROR: docker build failed"
-	fi
-fi
-
-# Start the docker-in-docker daemon from the image we just built
-if [ $ec -eq 0 ]; then
-	echo "INFO: Starting build of a Linux daemon to test against, and starting it..."
-	set -x
-	# aufs in aufs is faster than vfs in aufs
-	docker run -d $run_extra_args -e DOCKER_GRAPHDRIVER=aufs --pid host --privileged --name "docker-$COMMITHASH" --net host "docker:$COMMITHASH"
-	ec=$?
-	set +x
-	if [ 0 -ne $ec ]; then
-	    	echo "ERROR: Failed to compile and start the linux daemon"
-	fi
-fi
-
-# Build locally.
-if [ $ec -eq 0 ]; then
-	echo "INFO: Starting local build of Windows binary..."
-	set -x
-	export TIMEOUT="120m"
-	export DOCKER_HOST="tcp://$ip:$port_inner"
-    # This can be removed
-	export DOCKER_TEST_HOST="tcp://$ip:$port_inner"
-	unset DOCKER_CLIENTONLY
-	export DOCKER_REMOTE_DAEMON=1
-	hack/make.sh binary 
-	ec=$?
-	set +x
-	if [ 0 -ne $ec ]; then
-	    echo "ERROR: Build of binary on Windows failed"
-	fi
-fi
-
-# Make a local copy of the built binary and ensure that is first in our path
-if [ $ec -eq 0 ]; then
-	VERSION=$(< ./VERSION)
-	if [ $splitBinary -eq 0 ]; then
-		cp bundles/$VERSION/binary/docker.exe $TEMP
-	else
-		cp bundles/$VERSION/binary-client/docker.exe $TEMP
-	fi
-	ec=$?
-	if [ 0 -ne $ec ]; then
-		echo "ERROR: Failed to copy built binary to $TEMP"
-	fi
-	export PATH=$TEMP:$PATH
-fi
-
-# Run the integration tests
-if [ $ec -eq 0 ]; then	
-	echo "INFO: Running Integration tests..."
-	set -x
-	export DOCKER_TEST_TLS_VERIFY="$DOCKER_TLS_VERIFY"
-	export DOCKER_TEST_CERT_PATH="$DOCKER_CERT_PATH"
-	#export TESTFLAGS='-check.vv'
-	hack/make.sh test-integration-cli
-	ec=$?
-	set +x
-	if [ 0 -ne $ec ]; then
-		echo "ERROR: CLI test failed."
-		# Next line is useful, but very long winded if included
-		docker -H=$MAIN_DOCKER_HOST logs --tail 100 "docker-$COMMITHASH"
-    fi
-fi
-
-# Tidy up any temporary files from the CI run
-if [ ! -z $COMMITHASH ]; then
-	rm -rf $TEMP
-fi
-
-# CI Integrity check - ensure we are using the same version of go as present in the Dockerfile
-GOVER_DOCKERFILE=`grep 'ENV GO_VERSION' Dockerfile | awk '{print $3}'`
-GOVER_INSTALLED=`go version | awk '{print $3}'`
-if [ "${GOVER_INSTALLED:2}" != "$GOVER_DOCKERFILE" ]; then
-	#ec=1  # Uncomment to make CI fail once all nodes are updated.
-	echo
-	echo "---------------------------------------------------------------------------"
-	echo "WARN: CI should be using go version $GOVER_DOCKERFILE, but is using ${GOVER_INSTALLED:2}"
-	echo "      Please ping #docker-maintainers on IRC to get this CI server updated."
-	echo "---------------------------------------------------------------------------"
-	echo
-fi
-
-# Check the Linux box is running a matching version of docker
-if [ "$uniques" -ne 1 ]; then
-    ec=0  # Uncomment to make CI fail once all nodes are updated.
-	echo
-	echo "---------------------------------------------------------------------------"
-	echo "ERROR: This CI node is not running the same version of docker as the daemon."
-	echo "       This is a CI configuration issue."
-	echo "---------------------------------------------------------------------------"
-	echo
-fi
-
-# Tell the user how we did.
-if [ $ec -eq 0 ]; then
-	echo INFO: Completed successfully at `date`. 
-else
-	echo ERROR: Failed with exitcode $ec at `date`.
-fi
-exit $ec
diff --git a/hack/Jenkins/readme.md b/hack/Jenkins/readme.md
deleted file mode 100644
index ace3f3f..0000000
--- a/hack/Jenkins/readme.md
+++ /dev/null
@@ -1,3 +0,0 @@
-These files under this directory are for reference only. 
-
-They are used by Jenkins for CI runs.
\ No newline at end of file
diff --git a/hack/ci/arm b/hack/ci/arm
new file mode 100755
index 0000000..e60332a
--- /dev/null
+++ b/hack/ci/arm
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+# Entrypoint for jenkins arm CI build
+set -eu -o pipefail
+
+hack/test/unit
+
+hack/make.sh \
+	binary-daemon \
+	dynbinary \
+	test-integration
diff --git a/hack/ci/experimental b/hack/ci/experimental
new file mode 100755
index 0000000..9ccbc84
--- /dev/null
+++ b/hack/ci/experimental
@@ -0,0 +1,9 @@
+#!/usr/bin/env bash
+# Entrypoint for jenkins experimental CI
+set -eu -o pipefail
+
+export DOCKER_EXPERIMENTAL=y
+
+hack/make.sh \
+	binary-daemon \
+	test-integration
diff --git a/hack/ci/janky b/hack/ci/janky
new file mode 100755
index 0000000..180f8c60
--- /dev/null
+++ b/hack/ci/janky
@@ -0,0 +1,13 @@
+#!/usr/bin/env bash
+# Entrypoint for jenkins janky CI build
+set -eu -o pipefail
+
+hack/validate/default
+hack/test/unit
+
+hack/make.sh \
+	binary-daemon \
+	dynbinary \
+	test-docker-py \
+	test-integration \
+	cross
diff --git a/hack/ci/powerpc b/hack/ci/powerpc
new file mode 100755
index 0000000..c36cf37
--- /dev/null
+++ b/hack/ci/powerpc
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+# Entrypoint for jenkins powerpc CI build
+set -eu -o pipefail
+
+hack/test/unit
+hack/make.sh dynbinary test-integration
diff --git a/hack/ci/z b/hack/ci/z
new file mode 100755
index 0000000..5ba868e
--- /dev/null
+++ b/hack/ci/z
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+# Entrypoint for jenkins s390x (z) CI build
+set -eu -o pipefail
+
+hack/test/unit
+hack/make.sh dynbinary test-integration
diff --git a/hack/dockerfile/binaries-commits b/hack/dockerfile/binaries-commits
index 3a1037c..2643f57 100644
--- a/hack/dockerfile/binaries-commits
+++ b/hack/dockerfile/binaries-commits
@@ -3,11 +3,11 @@
 TOMLV_COMMIT=9baf8a8a9f2ed20a8e54160840c492f937eeaf9a
 
 # When updating RUNC_COMMIT, also update runc in vendor.conf accordingly
-RUNC_COMMIT=3f2f8b84a77f73d38244dd690525642a72156c64
-CONTAINERD_COMMIT=06b9cb35161009dcb7123345749fef02f7cea8e0
+RUNC_COMMIT=0351df1c5a66838d0c392b4ac4cf9450de844e2d
+CONTAINERD_COMMIT=992280e8e265f491f7a624ab82f3e238be086e49
 TINI_COMMIT=949e6facb77383876aeff8a6944dde66b3089574
 LIBNETWORK_COMMIT=7b2b1feb1de4817d522cc372af149ff48d25028e
-VNDR_COMMIT=9909bb2b8a0b7ea464527b376dc50389c90df587
+VNDR_COMMIT=a6e196d8b4b0cbbdc29aebdb20c59ac6926bb384
 
 # Linting
-GOMETALINTER_COMMIT=f7b6e55301c9c67035003b7ba7f8a1cde532d338
+GOMETALINTER_COMMIT=bfcc1d6942136fd86eb6f1a6fb328de8398fbd80
diff --git a/hack/dockerfile/install-binaries.sh b/hack/dockerfile/install-binaries.sh
index 4debd3a..e97385e 100755
--- a/hack/dockerfile/install-binaries.sh
+++ b/hack/dockerfile/install-binaries.sh
@@ -32,7 +32,24 @@
 	git clone https://github.com/containerd/containerd.git "$GOPATH/src/github.com/containerd/containerd"
 	cd "$GOPATH/src/github.com/containerd/containerd"
 	git checkout -q "$CONTAINERD_COMMIT"
-	make $1
+	(
+		export GOPATH
+		make
+	)
+	cp bin/containerd /usr/local/bin/docker-containerd
+	cp bin/containerd-shim /usr/local/bin/docker-containerd-shim
+	cp bin/ctr /usr/local/bin/docker-containerd-ctr
+}
+
+install_containerd_static() {
+	echo "Install containerd version $CONTAINERD_COMMIT"
+	git clone https://github.com/containerd/containerd.git "$GOPATH/src/github.com/containerd/containerd"
+	cd "$GOPATH/src/github.com/containerd/containerd"
+	git checkout -q "$CONTAINERD_COMMIT"
+	(
+		export GOPATH
+		make EXTRA_FLAGS="-buildmode pie" EXTRA_LDFLAGS="-extldflags \\\"-fno-PIC -static\\\""
+	)
 	cp bin/containerd /usr/local/bin/docker-containerd
 	cp bin/containerd-shim /usr/local/bin/docker-containerd-shim
 	cp bin/ctr /usr/local/bin/docker-containerd-ctr
@@ -103,7 +120,7 @@
 			;;
 
 		containerd)
-			install_containerd static
+			install_containerd_static
 			;;
 
 		containerd-dynamic)
diff --git a/hack/integration-cli-on-swarm/agent/master/call.go b/hack/integration-cli-on-swarm/agent/master/call.go
index 858c2c0..dab9c67 100644
--- a/hack/integration-cli-on-swarm/agent/master/call.go
+++ b/hack/integration-cli-on-swarm/agent/master/call.go
@@ -73,14 +73,14 @@
 				}
 				log.Printf("Finished chunk %d [%d/%d] with %d test filters in %s, code=%d.",
 					chunkID, passed+failed, len(testChunks), len(tests),
-					time.Now().Sub(chunkBegin), result.Code)
+					time.Since(chunkBegin), result.Code)
 			}
 		}(chunkID, tests)
 	}
 	wg.Wait()
 	// TODO: print actual tests rather than chunks
 	log.Printf("Executed %d chunks in %s. PASS: %d, FAIL: %d.",
-		len(testChunks), time.Now().Sub(begin), passed, failed)
+		len(testChunks), time.Since(begin), passed, failed)
 	if failed > 0 {
 		return fmt.Errorf("%d chunks failed", failed)
 	}
@@ -103,7 +103,7 @@
 
 func executeTestChunkWithRetry(funkerName string, args types.Args) (types.Result, error) {
 	begin := time.Now()
-	for i := 0; time.Now().Sub(begin) < funkerRetryTimeout; i++ {
+	for i := 0; time.Since(begin) < funkerRetryTimeout; i++ {
 		result, err := executeTestChunk(funkerName, args)
 		if err == nil {
 			log.Printf("executeTestChunk(%q, %d) returned code %d in trial %d", funkerName, args.ChunkID, result.Code, i)
diff --git a/hack/integration-cli-on-swarm/agent/worker/worker.go b/hack/integration-cli-on-swarm/agent/worker/worker.go
index 36ab368..ea8bb3f 100644
--- a/hack/integration-cli-on-swarm/agent/worker/worker.go
+++ b/hack/integration-cli-on-swarm/agent/worker/worker.go
@@ -58,7 +58,7 @@
 				RawLog:  rawLog,
 			}
 		}
-		elapsed := time.Now().Sub(begin)
+		elapsed := time.Since(begin)
 		log.Printf("Finished chunk %d, code=%d, elapsed=%v", args.ChunkID, code, elapsed)
 		return types.Result{
 			ChunkID: args.ChunkID,
diff --git a/hack/make.ps1 b/hack/make.ps1
index 73c9577..3380a5b 100644
--- a/hack/make.ps1
+++ b/hack/make.ps1
@@ -114,12 +114,6 @@
     return $gitCommit
 }
 
-# Utility function to get get the current build version of docker
-Function Get-DockerVersion() {
-    if (-not (Test-Path ".\VERSION")) { Throw "VERSION file not found. Is this running from the root of a docker repository?" }
-    return $(Get-Content ".\VERSION" -raw).ToString().Replace("`n","").Trim()
-}
-
 # Utility function to determine if we are running in a container or not.
 # In Windows, we get this through an environment variable set in `Dockerfile.Windows`
 Function Check-InContainer() {
@@ -356,7 +350,7 @@
     if ($CommitSuffix -ne "") { $gitCommit += "-"+$CommitSuffix -Replace ' ', '' }
 
     # Get the version of docker (eg 17.04.0-dev)
-    $dockerVersion=Get-DockerVersion
+    $dockerVersion="0.0.0-dev"
 
     # Give a warning if we are not running in a container and are building binaries or running unit tests.
     # Not relevant for validation tests as these are fine to run outside of a container.
diff --git a/hack/make.sh b/hack/make.sh
index e52ece2..99c51a7 100755
--- a/hack/make.sh
+++ b/hack/make.sh
@@ -59,15 +59,13 @@
 	binary-daemon
 	dynbinary
 
-	test-unit
 	test-integration
 	test-docker-py
 
 	cross
-	tgz
 )
 
-VERSION=${VERSION:-$(< ./VERSION)}
+VERSION=${VERSION:-dev}
 ! BUILDTIME=$(date -u -d "@${SOURCE_DATE_EPOCH:-$(date +%s)}" --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/')
 if [ "$DOCKER_GITCOMMIT" ]; then
 	GITCOMMIT="$DOCKER_GITCOMMIT"
@@ -97,13 +95,6 @@
 	mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")"
 	ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}"
 	export GOPATH="${PWD}/.gopath"
-
-	if [ "$(go env GOOS)" = 'solaris' ]; then
-		# sys/unix is installed outside the standard library on solaris
-		# TODO need to allow for version change, need to get version from go
-		export GO_VERSION=${GO_VERSION:-"1.8.1"}
-		export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}"
-	fi
 fi
 
 if [ ! "$GOPATH" ]; then
@@ -190,20 +181,18 @@
 }
 
 main() {
-	# We want this to fail if the bundles already exist and cannot be removed.
-	# This is to avoid mixing bundles from different versions of the code.
-	mkdir -p bundles
-	if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then
-		echo "bundles/$VERSION already exists. Removing."
-		rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1
+	if [ -z "${KEEPBUNDLE-}" ]; then
+		echo "Removing bundles/"
+		rm -rf "bundles/*"
 		echo
 	fi
+	mkdir -p bundles
 
+	# Windows and symlinks don't get along well
 	if [ "$(go env GOHOSTOS)" != 'windows' ]; then
-		# Windows and symlinks don't get along well
-
 		rm -f bundles/latest
-		ln -s "$VERSION" bundles/latest
+		# preserve latest symlink for backward compatibility
+		ln -sf . bundles/latest
 	fi
 
 	if [ $# -lt 1 ]; then
@@ -212,7 +201,7 @@
 		bundles=($@)
 	fi
 	for bundle in ${bundles[@]}; do
-		export DEST="bundles/$VERSION/$(basename "$bundle")"
+		export DEST="bundles/$(basename "$bundle")"
 		# Cygdrive paths don't play well with go build -o.
 		if [[ "$(uname -s)" == CYGWIN* ]]; then
 			export DEST="$(cygpath -mw "$DEST")"
diff --git a/hack/make/.build-deb/compat b/hack/make/.build-deb/compat
deleted file mode 100644
index ec63514..0000000
--- a/hack/make/.build-deb/compat
+++ /dev/null
@@ -1 +0,0 @@
-9
diff --git a/hack/make/.build-deb/control b/hack/make/.build-deb/control
deleted file mode 100644
index 0f54399..0000000
--- a/hack/make/.build-deb/control
+++ /dev/null
@@ -1,29 +0,0 @@
-Source: docker-engine
-Section: admin
-Priority: optional
-Maintainer: Docker <support@docker.com>
-Standards-Version: 3.9.6
-Homepage: https://dockerproject.org
-Vcs-Browser: https://github.com/docker/docker
-Vcs-Git: git://github.com/docker/docker.git
-
-Package: docker-engine
-Architecture: linux-any
-Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
-Recommends: aufs-tools,
-            ca-certificates,
-            cgroupfs-mount | cgroup-lite,
-            git,
-            xz-utils,
-            ${apparmor:Recommends}
-Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs
-Description: Docker: the open-source application container engine
- Docker is an open source project to build, ship and run any application as a
- lightweight container
- .
- Docker containers are both hardware-agnostic and platform-agnostic. This means
- they can run anywhere, from your laptop to the largest EC2 compute instance and
- everything in between - and they don't require you to use a particular
- language, framework or packaging system. That makes them great building blocks
- for deploying and scaling web apps, databases, and backend services without
- depending on a particular stack or provider.
diff --git a/hack/make/.build-deb/docker-engine.bash-completion b/hack/make/.build-deb/docker-engine.bash-completion
deleted file mode 100644
index 6ea1119..0000000
--- a/hack/make/.build-deb/docker-engine.bash-completion
+++ /dev/null
@@ -1 +0,0 @@
-contrib/completion/bash/docker
diff --git a/hack/make/.build-deb/docker-engine.docker.default b/hack/make/.build-deb/docker-engine.docker.default
deleted file mode 120000
index 4278533..0000000
--- a/hack/make/.build-deb/docker-engine.docker.default
+++ /dev/null
@@ -1 +0,0 @@
-../../../contrib/init/sysvinit-debian/docker.default
\ No newline at end of file
diff --git a/hack/make/.build-deb/docker-engine.docker.init b/hack/make/.build-deb/docker-engine.docker.init
deleted file mode 120000
index 8cb89d3..0000000
--- a/hack/make/.build-deb/docker-engine.docker.init
+++ /dev/null
@@ -1 +0,0 @@
-../../../contrib/init/sysvinit-debian/docker
\ No newline at end of file
diff --git a/hack/make/.build-deb/docker-engine.docker.upstart b/hack/make/.build-deb/docker-engine.docker.upstart
deleted file mode 120000
index 7e1b64a..0000000
--- a/hack/make/.build-deb/docker-engine.docker.upstart
+++ /dev/null
@@ -1 +0,0 @@
-../../../contrib/init/upstart/docker.conf
\ No newline at end of file
diff --git a/hack/make/.build-deb/docker-engine.install b/hack/make/.build-deb/docker-engine.install
deleted file mode 100644
index dc6b25f..0000000
--- a/hack/make/.build-deb/docker-engine.install
+++ /dev/null
@@ -1,12 +0,0 @@
-#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/
-#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/
-#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/
-contrib/*-integration usr/share/docker-engine/contrib/
-contrib/check-config.sh usr/share/docker-engine/contrib/
-contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/
-contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/
-contrib/init/systemd/docker.service lib/systemd/system/
-contrib/init/systemd/docker.socket lib/systemd/system/
-contrib/mk* usr/share/docker-engine/contrib/
-contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/
-contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/
diff --git a/hack/make/.build-deb/docker-engine.manpages b/hack/make/.build-deb/docker-engine.manpages
deleted file mode 100644
index 1aa6218..0000000
--- a/hack/make/.build-deb/docker-engine.manpages
+++ /dev/null
@@ -1 +0,0 @@
-man/man*/*
diff --git a/hack/make/.build-deb/docker-engine.postinst b/hack/make/.build-deb/docker-engine.postinst
deleted file mode 100644
index eeef6ca..0000000
--- a/hack/make/.build-deb/docker-engine.postinst
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh
-set -e
-
-case "$1" in
-	configure)
-		if [ -z "$2" ]; then
-			if ! getent group docker > /dev/null; then
-				groupadd --system docker
-			fi
-		fi
-		;;
-	abort-*)
-		# How'd we get here??
-		exit 1
-		;;
-	*)
-		;;
-esac
-
-#DEBHELPER#
diff --git a/hack/make/.build-deb/docker-engine.udev b/hack/make/.build-deb/docker-engine.udev
deleted file mode 120000
index 914a361..0000000
--- a/hack/make/.build-deb/docker-engine.udev
+++ /dev/null
@@ -1 +0,0 @@
-../../../contrib/udev/80-docker.rules
\ No newline at end of file
diff --git a/hack/make/.build-deb/docs b/hack/make/.build-deb/docs
deleted file mode 100644
index b43bf86..0000000
--- a/hack/make/.build-deb/docs
+++ /dev/null
@@ -1 +0,0 @@
-README.md
diff --git a/hack/make/.build-deb/rules b/hack/make/.build-deb/rules
deleted file mode 100755
index 19557ed..0000000
--- a/hack/make/.build-deb/rules
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/make -f
-
-VERSION = $(shell cat VERSION)
-SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1)
-SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true )
-
-override_dh_gencontrol:
-	# if we're on Ubuntu, we need to Recommends: apparmor
-	echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
-	dh_gencontrol
-
-override_dh_auto_build:
-	./hack/make.sh dynbinary
-	# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
-
-override_dh_auto_test:
-	./bundles/$(VERSION)/dynbinary-daemon/dockerd -v
-
-override_dh_strip:
-	# Go has lots of problems with stripping, so just don't
-
-override_dh_auto_install:
-	mkdir -p debian/docker-engine/usr/bin
-	cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd
-	cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy
-	cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd
-	cp -aT /usr/local/bin/docker-containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim
-	cp -aT /usr/local/bin/docker-containerd-ctr debian/docker-engine/usr/bin/docker-containerd-ctr
-	cp -aT /usr/local/bin/docker-runc debian/docker-engine/usr/bin/docker-runc
-	cp -aT /usr/local/bin/docker-init debian/docker-engine/usr/bin/docker-init
-	mkdir -p debian/docker-engine/usr/lib/docker
-
-override_dh_installinit:
-	# use "docker" as our service name, not "docker-engine"
-	dh_installinit --name=docker
-ifeq (true, $(SYSTEMD_GT_227))
-	$(warning "Setting TasksMax=infinity")
-	sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service
-endif
-
-override_dh_installudev:
-	# match our existing priority
-	dh_installudev --priority=z80
-
-override_dh_install:
-	dh_install
-	dh_apparmor --profile-name=docker-engine -pdocker-engine
-
-override_dh_shlibdeps:
-	dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info
-
-%:
-	dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd)
diff --git a/hack/make/.build-rpm/docker-engine-selinux.spec b/hack/make/.build-rpm/docker-engine-selinux.spec
deleted file mode 100644
index 6a4b6c0..0000000
--- a/hack/make/.build-rpm/docker-engine-selinux.spec
+++ /dev/null
@@ -1,99 +0,0 @@
-# Some bits borrowed from the openstack-selinux package
-Name: docker-engine-selinux
-Version: %{_version}
-Release: %{_release}%{?dist}
-Summary: SELinux Policies for the open-source application container engine
-BuildArch: noarch
-Group: Tools/Docker
-
-License: GPLv2
-Source: %{name}.tar.gz
-
-URL: https://dockerproject.org
-Vendor: Docker
-Packager: Docker <support@docker.com>
-
-%global selinux_policyver 3.13.1-102
-%if 0%{?oraclelinux} >= 7
-%global selinux_policyver 3.13.1-102.0.3.el7_3.15
-%endif # oraclelinux 7
-%global selinuxtype targeted
-%global moduletype  services
-%global modulenames docker
-
-Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils
-BuildRequires: selinux-policy selinux-policy-devel
-
-# conflicting packages
-Conflicts: docker-selinux
-
-# Usage: _format var format
-#   Expand 'modulenames' into various formats as needed
-#   Format must contain '$x' somewhere to do anything useful
-%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done;
-
-# Relabel files
-%global relabel_files() \
-    /sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \
-
-%description
-SELinux policy modules for use with Docker
-
-%prep
-%if 0%{?centos} <= 6
-%setup -n %{name}
-%else
-%autosetup -n %{name}
-%endif
-
-%build
-make SHARE="%{_datadir}" TARGETS="%{modulenames}"
-
-%install
-
-# Install SELinux interfaces
-%_format INTERFACES $x.if
-install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype}
-install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype}
-
-# Install policy modules
-%_format MODULES $x.pp.bz2
-install -d %{buildroot}%{_datadir}/selinux/packages
-install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages
-
-%post
-#
-# Install all modules in a single transaction
-#
-if [ $1 -eq 1 ]; then
-    %{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1
-fi
-%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2
-%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES
-if %{_sbindir}/selinuxenabled ; then
-    %{_sbindir}/load_policy
-    %relabel_files
-    if [ $1 -eq 1 ]; then
-      restorecon -R %{_sharedstatedir}/docker
-    fi
-fi
-
-%postun
-if [ $1 -eq 0 ]; then
-    %{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || :
-    if %{_sbindir}/selinuxenabled ; then
-        %{_sbindir}/load_policy
-        %relabel_files
-    fi
-fi
-
-%files
-%doc LICENSE
-%defattr(-,root,root,0755)
-%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2
-%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if
-
-%changelog
-* Tue Dec 1 2015 Jessica Frazelle <acidburn@docker.com> 1.9.1-1
-- add licence to rpm
-- add selinux-policy and docker-engine-selinux rpm
diff --git a/hack/make/.build-rpm/docker-engine.spec b/hack/make/.build-rpm/docker-engine.spec
deleted file mode 100644
index 6225bb7..0000000
--- a/hack/make/.build-rpm/docker-engine.spec
+++ /dev/null
@@ -1,249 +0,0 @@
-Name: docker-engine
-Version: %{_version}
-Release: %{_release}%{?dist}
-Summary: The open-source application container engine
-Group: Tools/Docker
-
-License: ASL 2.0
-Source: %{name}.tar.gz
-
-URL: https://dockerproject.org
-Vendor: Docker
-Packager: Docker <support@docker.com>
-
-# is_systemd conditional
-%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210
-%global is_systemd 1
-%endif
-
-# required packages for build
-# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh)
-# only require systemd on those systems
-%if 0%{?is_systemd}
-%if 0%{?suse_version} >= 1210
-BuildRequires: systemd-rpm-macros
-%{?systemd_requires}
-%else
-%if 0%{?fedora} >= 25
-# Systemd 230 and up no longer have libsystemd-journal (see https://bugzilla.redhat.com/show_bug.cgi?id=1350301)
-BuildRequires: pkgconfig(systemd)
-Requires: systemd-units
-%else
-BuildRequires: pkgconfig(systemd)
-Requires: systemd-units
-BuildRequires: pkgconfig(libsystemd-journal)
-%endif
-%endif
-%else
-Requires(post): chkconfig
-Requires(preun): chkconfig
-# This is for /sbin/service
-Requires(preun): initscripts
-%endif
-
-# required packages on install
-Requires: /bin/sh
-Requires: iptables
-%if !0%{?suse_version}
-Requires: libcgroup
-%else
-Requires: libcgroup1
-%endif
-Requires: tar
-Requires: xz
-%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 || 0%{?amzn} >= 1
-# Resolves: rhbz#1165615
-Requires: device-mapper-libs >= 1.02.90-1
-%endif
-%if 0%{?oraclelinux} >= 6
-# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper
-Requires: kernel-uek >= 4.1
-Requires: device-mapper >= 1.02.90-2
-%endif
-
-# docker-selinux conditional
-%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
-%global with_selinux 1
-%endif
-
-# DWZ problem with multiple golang binary, see bug
-# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12
-%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
-%global _dwz_low_mem_die_limit 0
-%endif
-
-# start if with_selinux
-%if 0%{?with_selinux}
-
-%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?fedora} >= 25
-Requires: container-selinux >= 2.9
-%endif# centos 7, rhel 7, fedora 25
-
-%if 0%{?oraclelinux} >= 7
-%global selinux_policyver 3.13.1-102.0.3.el7_3.15
-%endif # oraclelinux 7
-%if 0%{?fedora} == 24
-%global selinux_policyver 3.13.1-191
-%endif # fedora 24 -- container-selinux on fedora24 does not properly set dockerd, for now just carry docker-engine-selinux for it
-%if 0%{?oraclelinux} >= 7 || 0%{?fedora} == 24
-Requires: selinux-policy >= %{selinux_policyver}
-Requires(pre): %{name}-selinux >= %{version}-%{release}
-%endif # selinux-policy for oraclelinux-7, fedora-24
-
-%endif # with_selinux
-
-# conflicting packages
-Conflicts: docker
-Conflicts: docker-io
-Conflicts: docker-engine-cs
-
-%description
-Docker is an open source project to build, ship and run any application as a
-lightweight container.
-
-Docker containers are both hardware-agnostic and platform-agnostic. This means
-they can run anywhere, from your laptop to the largest EC2 compute instance and
-everything in between - and they don't require you to use a particular
-language, framework or packaging system. That makes them great building blocks
-for deploying and scaling web apps, databases, and backend services without
-depending on a particular stack or provider.
-
-%prep
-%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6
-%setup -n %{name}
-%else
-%autosetup -n %{name}
-%endif
-
-%build
-export DOCKER_GITCOMMIT=%{_gitcommit}
-./hack/make.sh dynbinary
-# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
-
-%check
-./bundles/%{_origversion}/dynbinary-daemon/dockerd -v
-
-%install
-# install binary
-install -d $RPM_BUILD_ROOT/%{_bindir}
-install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd
-
-# install proxy
-install -p -m 755 /usr/local/bin/docker-proxy $RPM_BUILD_ROOT/%{_bindir}/docker-proxy
-
-# install containerd
-install -p -m 755 /usr/local/bin/docker-containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd
-install -p -m 755 /usr/local/bin/docker-containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim
-install -p -m 755 /usr/local/bin/docker-containerd-ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr
-
-# install runc
-install -p -m 755 /usr/local/bin/docker-runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc
-
-# install tini
-install -p -m 755 /usr/local/bin/docker-init $RPM_BUILD_ROOT/%{_bindir}/docker-init
-
-# install udev rules
-install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d
-install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules
-
-# add init scripts
-install -d $RPM_BUILD_ROOT/etc/sysconfig
-install -d $RPM_BUILD_ROOT/%{_initddir}
-
-
-%if 0%{?is_systemd}
-install -d $RPM_BUILD_ROOT/%{_unitdir}
-install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service
-%else
-install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker
-install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker
-%endif
-# add bash, zsh, and fish completions
-install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions
-install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions
-install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d
-install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker
-install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker
-install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish
-
-# install manpages
-install -d %{buildroot}%{_mandir}/man1
-install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1
-install -d %{buildroot}%{_mandir}/man5
-install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5
-install -d %{buildroot}%{_mandir}/man8
-install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8
-
-# add vimfiles
-install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc
-install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect
-install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax
-install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt
-install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
-install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim
-
-# add nano
-install -d $RPM_BUILD_ROOT/usr/share/nano
-install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc
-
-# list files owned by the package here
-%files
-%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md
-/%{_bindir}/docker
-/%{_bindir}/dockerd
-/%{_bindir}/docker-containerd
-/%{_bindir}/docker-containerd-shim
-/%{_bindir}/docker-containerd-ctr
-/%{_bindir}/docker-proxy
-/%{_bindir}/docker-runc
-/%{_bindir}/docker-init
-/%{_sysconfdir}/udev/rules.d/80-docker.rules
-%if 0%{?is_systemd}
-/%{_unitdir}/docker.service
-%else
-%config(noreplace,missingok) /etc/sysconfig/docker
-/%{_initddir}/docker
-%endif
-/usr/share/bash-completion/completions/docker
-/usr/share/zsh/vendor-completions/_docker
-/usr/share/fish/vendor_completions.d/docker.fish
-%doc
-/%{_mandir}/man1/*
-/%{_mandir}/man5/*
-/%{_mandir}/man8/*
-/usr/share/vim/vimfiles/doc/dockerfile.txt
-/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
-/usr/share/vim/vimfiles/syntax/dockerfile.vim
-/usr/share/nano/Dockerfile.nanorc
-
-%post
-%if 0%{?is_systemd}
-%systemd_post docker
-%else
-# This adds the proper /etc/rc*.d links for the script
-/sbin/chkconfig --add docker
-%endif
-if ! getent group docker > /dev/null; then
-    groupadd --system docker
-fi
-
-%preun
-%if 0%{?is_systemd}
-%systemd_preun docker
-%else
-if [ $1 -eq 0 ] ; then
-    /sbin/service docker stop >/dev/null 2>&1
-    /sbin/chkconfig --del docker
-fi
-%endif
-
-%postun
-%if 0%{?is_systemd}
-%systemd_postun_with_restart docker
-%else
-if [ "$1" -ge "1" ] ; then
-    /sbin/service docker condrestart >/dev/null 2>&1 || :
-fi
-%endif
-
-%changelog
diff --git a/hack/make/.detect-daemon-osarch b/hack/make/.detect-daemon-osarch
index ac16055..26cb30f 100644
--- a/hack/make/.detect-daemon-osarch
+++ b/hack/make/.detect-daemon-osarch
@@ -60,9 +60,6 @@
 			windows)
 				DOCKERFILE='Dockerfile.windows'
 				;;
-			solaris)
-				DOCKERFILE='Dockerfile.solaris'
-				;;
 		esac
 		;;
 	*)
diff --git a/hack/make/.go-autogen b/hack/make/.go-autogen
index ec20180..b68e3a7 100644
--- a/hack/make/.go-autogen
+++ b/hack/make/.go-autogen
@@ -17,6 +17,7 @@
 	Version            string = "$VERSION"
 	BuildTime          string = "$BUILDTIME"
 	IAmStatic          string = "${IAMSTATIC:-true}"
+	ContainerdCommitID string = "${CONTAINERD_COMMIT}"
 )
 
 // AUTOGENERATED FILE; see /go/src/github.com/docker/docker/hack/make/.go-autogen
@@ -31,9 +32,8 @@
 // Default build-time variable for library-import.
 // This file is overridden on build with build-time informations.
 const (
-	ContainerdCommitID string = "${CONTAINERD_COMMIT}"
-	RuncCommitID       string = "${RUNC_COMMIT}"
-	InitCommitID       string = "${TINI_COMMIT}"
+	RuncCommitID string = "${RUNC_COMMIT}"
+	InitCommitID string = "${TINI_COMMIT}"
 )
 
 // AUTOGENERATED FILE; see /go/src/github.com/docker/docker/hack/make/.go-autogen
diff --git a/hack/make/.integration-test-helpers b/hack/make/.integration-test-helpers
index e3cb7d8..abd1d0f 100644
--- a/hack/make/.integration-test-helpers
+++ b/hack/make/.integration-test-helpers
@@ -5,8 +5,10 @@
 #
 #     TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration
 #
-
-source "$SCRIPTDIR/make/.go-autogen"
+if [ -z $MAKEDIR ]; then
+	export MAKEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+fi
+source "$MAKEDIR/.go-autogen"
 
 # Set defaults
 : ${TEST_REPEAT:=1}
@@ -15,7 +17,7 @@
 
 integration_api_dirs=${TEST_INTEGRATION_DIR:-"$(
 	find ./integration -type d |
-	grep -vE '^(./integration$|./integration/util)')"}
+	grep -vE '(^./integration($|/util)|/testdata)')"}
 
 run_test_integration() {
 	[[ "$TESTFLAGS" != *-check.f* ]] && run_test_integration_suites
diff --git a/hack/make/build-deb b/hack/make/build-deb
deleted file mode 100644
index a698323..0000000
--- a/hack/make/build-deb
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# subshell so that we can export PATH and TZ without breaking other things
-(
-	export TZ=UTC # make sure our "date" variables are UTC-based
-	bundle .integration-daemon-start
-	bundle .detect-daemon-osarch
-
-	# TODO consider using frozen images for the dockercore/builder-deb tags
-
-	tilde='~' # ouch Bash 4.2 vs 4.3, you keel me
-	debVersion="${VERSION//-/$tilde}" # using \~ or '~' here works in 4.3, but not 4.2; just ~ causes $HOME to be inserted, hence the $tilde
-	# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
-	if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
-		gitUnix="$(git log -1 --pretty='%at')"
-		gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')"
-		gitCommit="$(git log -1 --pretty='%h')"
-		gitVersion="git${gitDate}.0.${gitCommit}"
-		# gitVersion is now something like 'git20150128.112847.0.17e840a'
-		debVersion="$debVersion~$gitVersion"
-
-		# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false
-		# true
-		# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false
-		# true
-		# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false
-		# true
-
-		# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a
-	fi
-
-	debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)"
-	debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)"
-	debDate="$(date --rfc-2822)"
-
-	# if go-md2man is available, pre-generate the man pages
-	make manpages
-
-	builderDir="contrib/builder/deb/${PACKAGE_ARCH}"
-	pkgs=( $(find "${builderDir}/"*/ -type d) )
-	if [ ! -z "$DOCKER_BUILD_PKGS" ]; then
-		pkgs=()
-		for p in $DOCKER_BUILD_PKGS; do
-			pkgs+=( "$builderDir/$p" )
-		done
-	fi
-	for dir in "${pkgs[@]}"; do
-		[ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; }
-		version="$(basename "$dir")"
-		suite="${version##*-}"
-
-		image="dockercore/builder-deb:$version"
-		if ! docker inspect "$image" &> /dev/null; then
-			(
-				# Add the APT_MIRROR args only if the consuming Dockerfile uses it
-				# Otherwise this will cause the build to fail
-				if [ "$(grep 'ARG APT_MIRROR=' $dir/Dockerfile)" ] && [ "$BUILD_APT_MIRROR" ]; then
-					DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS $BUILD_APT_MIRROR"
-				fi
-				set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir"
-			)
-		fi
-
-		mkdir -p "$DEST/$version"
-		cat > "$DEST/$version/Dockerfile.build" <<-EOF
-			FROM $image
-			WORKDIR /usr/src/docker
-			COPY . /usr/src/docker
-			ENV DOCKER_GITCOMMIT $GITCOMMIT
-			RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \
-				&& ln -snf /usr/src/docker /go/src/github.com/docker/docker
-		EOF
-
-		cat >> "$DEST/$version/Dockerfile.build" <<-EOF
-			# Install runc, containerd, proxy and tini
-			RUN ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini
-		EOF
-		cat >> "$DEST/$version/Dockerfile.build" <<-EOF
-			RUN cp -aL hack/make/.build-deb debian
-			RUN { echo '$debSource (${debVersion}-0~${version}) $suite; urgency=low'; echo; echo '  * Version: $VERSION'; echo; echo " -- $debMaintainer  $debDate"; } > debian/changelog && cat >&2 debian/changelog
-			RUN dpkg-buildpackage -uc -us -I.git
-		EOF
-		tempImage="docker-temp/build-deb:$version"
-		( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . )
-		docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version"
-		docker rmi "$tempImage"
-	done
-
-	bundle .integration-daemon-stop
-) 2>&1 | tee -a "$DEST/test.log"
diff --git a/hack/make/build-integration-test-binary b/hack/make/build-integration-test-binary
index ad3e8c2..bbd5a22 100755
--- a/hack/make/build-integration-test-binary
+++ b/hack/make/build-integration-test-binary
@@ -2,7 +2,6 @@
 # required by `make build-integration-cli-on-swarm`
 set -e
 
-source "${MAKEDIR}/.go-autogen"
 source hack/make/.integration-test-helpers
 
 build_test_suite_binaries
diff --git a/hack/make/build-rpm b/hack/make/build-rpm
deleted file mode 100644
index 1e89a78..0000000
--- a/hack/make/build-rpm
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# subshell so that we can export PATH and TZ without breaking other things
-(
-	export TZ=UTC # make sure our "date" variables are UTC-based
-
-	source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
-	source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch"
-
-	# TODO consider using frozen images for the dockercore/builder-rpm tags
-
-	rpmName=docker-engine
-	rpmVersion="$VERSION"
-	rpmRelease=1
-
-	# rpmRelease versioning is as follows
-	# Docker 1.7.0:  version=1.7.0, release=1
-	# Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1
-	# Docker 1.7.0-cs1: version=1.7.0.cs1, release=1
-	# Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1
-	# Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH
-
-	# if we have a "-rc*" suffix, set appropriate release
-	if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then
-		rcVersion=${rpmVersion#*-rc}
-		rpmVersion=${rpmVersion%-rc*}
-		rpmRelease="0.${rcVersion}.rc${rcVersion}"
-	fi
-
-	DOCKER_GITCOMMIT=$(git rev-parse --short HEAD)
-	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
-		DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported"
-	fi
-
-	# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
-	if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
-		gitUnix="$(git log -1 --pretty='%at')"
-		gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')"
-		gitCommit="$(git log -1 --pretty='%h')"
-		gitVersion="${gitDate}.git${gitCommit}"
-		# gitVersion is now something like '20150128.112847.17e840a'
-		rpmVersion="${rpmVersion%-dev}"
-		rpmRelease="0.0.$gitVersion"
-	fi
-
-	# Replace any other dashes with periods
-	rpmVersion="${rpmVersion/-/.}"
-
-	rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)"
-	rpmDate="$(date +'%a %b %d %Y')"
-
-	# if go-md2man is available, pre-generate the man pages
-	make manpages
-
-	# Convert the CHANGELOG.md file into RPM changelog format
-	rm -f contrib/builder/rpm/${PACKAGE_ARCH}/changelog
-	VERSION_REGEX="^\W\W (.*) \((.*)\)$"
-	ENTRY_REGEX="^[-+*] (.*)$"
-	while read -r line || [[ -n "$line" ]]; do
-		if [ -z "$line" ]; then continue; fi
-		if [[ "$line" =~ $VERSION_REGEX ]]; then
-			echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog
-			echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog
-		fi
-		if [[ "$line" =~ $ENTRY_REGEX ]]; then
-			echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog
-		fi
-	done < CHANGELOG.md
-
-	builderDir="contrib/builder/rpm/${PACKAGE_ARCH}"
-	pkgs=( $(find "${builderDir}/"*/ -type d) )
-	if [ ! -z "$DOCKER_BUILD_PKGS" ]; then
-		pkgs=()
-		for p in $DOCKER_BUILD_PKGS; do
-			pkgs+=( "$builderDir/$p" )
-		done
-	fi
-	for dir in "${pkgs[@]}"; do
-		[ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; }
-		version="$(basename "$dir")"
-		suite="${version##*-}"
-
-		image="dockercore/builder-rpm:$version"
-		if ! docker inspect "$image" &> /dev/null; then
-			( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" )
-		fi
-
-		mkdir -p "$DEST/$version"
-		cat > "$DEST/$version/Dockerfile.build" <<-EOF
-			FROM $image
-			COPY . /usr/src/${rpmName}
-			WORKDIR /usr/src/${rpmName}
-			RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers
-		EOF
-
-		cat >> "$DEST/$version/Dockerfile.build" <<-EOF
-			# Install runc, containerd, proxy and tini
-			RUN TMP_GOPATH="/go" ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini
-		EOF
-		if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
-			echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build"
-		fi
-		cat >> "$DEST/$version/Dockerfile.build" <<-EOF
-			RUN mkdir -p /root/rpmbuild/SOURCES \
-				&& echo '%_topdir /root/rpmbuild' > /root/.rpmmacros
-			WORKDIR /root/rpmbuild
-			RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS
-			WORKDIR /root/rpmbuild/SPECS
-			RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName}
-			RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd
-			RUN tar --exclude .git -r -C /go/src/github.com/docker/libnetwork/cmd -f /root/rpmbuild/SOURCES/${rpmName}.tar proxy
-			RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc
-			RUN tar --exclude .git -r -C /go/ -f /root/rpmbuild/SOURCES/${rpmName}.tar tini
-			RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar
-			RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec
-			RUN rpmbuild -ba \
-				--define '_gitcommit $DOCKER_GITCOMMIT' \
-				--define '_release $rpmRelease' \
-				--define '_version $rpmVersion' \
-				--define '_origversion $VERSION' \
-				--define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \
-				${rpmName}.spec
-		EOF
-		# selinux policy referencing systemd things won't work on non-systemd versions
-		# of centos or rhel, which we don't support anyways
-		if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then
-			if [ -d "./contrib/selinux-$version" ]; then
-				selinuxDir="selinux-${version}"
-				cat >> "$DEST/$version/Dockerfile.build" <<-EOF
-					RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux
-					RUN rpmbuild -ba \
-							--define '_gitcommit $DOCKER_GITCOMMIT' \
-							--define '_release $rpmRelease' \
-							--define '_version $rpmVersion' \
-							--define '_origversion $VERSION' \
-							${rpmName}-selinux.spec
-				EOF
-			fi
-		fi
-		tempImage="docker-temp/build-rpm:$version"
-		( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f $DEST/$version/Dockerfile.build . )
-		docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version"
-		docker rmi "$tempImage"
-	done
-
-	source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop"
-) 2>&1 | tee -a $DEST/test.log
diff --git a/hack/make/clean-apt-repo b/hack/make/clean-apt-repo
deleted file mode 100755
index e823cb5..0000000
--- a/hack/make/clean-apt-repo
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# This script cleans the experimental pool for the apt repo.
-# This is useful when there are a lot of old experimental debs and you only want to keep the most recent.
-#
-
-: ${DOCKER_RELEASE_DIR:=$DEST}
-APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental
-: ${DOCKER_ARCHIVE_DIR:=$DEST/archive}
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
-
-latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }')
-
-# get the latest version
-latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine)
-latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*})
-
-echo "latest docker-engine version: $latest_docker_engine_version"
-
-# remove all the files that are not that version in experimental
-pool_dir=$(dirname "$latest_docker_engine_file")
-old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") )
-
-echo "${old_pkgs[@]}"
-
-mkdir -p "$DOCKER_ARCHIVE_DIR"
-for old_pkg in "${old_pkgs[@]}"; do
-	echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR"
-	mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR"
-done
-
-echo
-echo "$pool_dir now has contents:"
-ls "$pool_dir"
-
-# now regenerate release files for experimental
-export COMPONENT=experimental
-source "${DIR}/update-apt-repo"
-
-echo "You will now want to: "
-echo "   - re-sign the repo with hack/make/sign-repo"
-echo "   - re-generate index files with hack/make/generate-index-listing"
diff --git a/hack/make/clean-yum-repo b/hack/make/clean-yum-repo
deleted file mode 100755
index 012689a..0000000
--- a/hack/make/clean-yum-repo
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# This script cleans the experimental pool for the yum repo.
-# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent.
-#
-
-: ${DOCKER_RELEASE_DIR:=$DEST}
-YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental
-
-suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) )
-
-for suite in "${suites[@]}"; do
-	echo "cleanup in: $suite"
-	( set -x; repomanage -k2 --old "$suite" | xargs rm -f )
-done
-
-echo "You will now want to: "
-echo "   - re-sign the repo with hack/make/sign-repo"
-echo "   - re-generate index files with hack/make/generate-index-listing"
diff --git a/hack/make/cover b/hack/make/cover
deleted file mode 100644
index 4a37995..0000000
--- a/hack/make/cover
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-bundle_cover() {
-	coverprofiles=( "$DEST/../"*"/coverprofiles/"* )
-	for p in "${coverprofiles[@]}"; do
-		echo
-		(
-			set -x
-			go tool cover -func="$p"
-		)
-	done
-}
-
-bundle_cover 2>&1 | tee "$DEST/report.log"
diff --git a/hack/make/generate-index-listing b/hack/make/generate-index-listing
deleted file mode 100755
index 9f12084..0000000
--- a/hack/make/generate-index-listing
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# This script generates index files for the directory structure
-# of the apt and yum repos
-
-: ${DOCKER_RELEASE_DIR:=$DEST}
-APTDIR=$DOCKER_RELEASE_DIR/apt
-YUMDIR=$DOCKER_RELEASE_DIR/yum
-
-if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then
-	echo >&2 'release-rpm or release-deb must be run before generate-index-listing'
-	exit 1
-fi
-
-create_index() {
-	local directory=$1
-	local original=$2
-	local cleaned=${directory#$original}
-
-	# the index file to create
-	local index_file="${directory}/index"
-
-	# cd into dir & touch the index file
-	cd $directory
-	touch $index_file
-
-	# print the html header
-	cat <<-EOF > "$index_file"
-	<!DOCTYPE html>
-	<html>
-	<head><title>Index of ${cleaned}/</title></head>
-	<body bgcolor="white">
-	<h1>Index of ${cleaned}/</h1><hr>
-	<pre><a href="../">../</a>
-	EOF
-
-	# start of content output
-	(
-	# change IFS locally within subshell so the for loop saves line correctly to L var
-	IFS=$'\n';
-
-	# pretty sweet, will mimic the normal apache output. skipping "index" and hidden files
-	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "<a href=\"%f\">%f|@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,</a>\1,g');
-	do
-		# file
-		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
-
-		# file with file size
-		F=$(du -bh $F | cut -f1);
-
-		# output with correct format
-		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
-	done;
-	) >> $index_file;
-
-	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
-	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "<a href=\"%f\">%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/</a>\1,g' >> $index_file
-
-	# print the footer html
-	echo "</pre><hr></body></html>" >> $index_file
-
-}
-
-get_dirs() {
-	local directory=$1
-
-	for d in `find ${directory} -type d`; do
-		create_index $d $directory
-	done
-}
-
-get_dirs $APTDIR
-get_dirs $YUMDIR
diff --git a/hack/make/install-binary b/hack/make/install-binary
old mode 100755
new mode 100644
index 57aa1a2..f6a4361
--- a/hack/make/install-binary
+++ b/hack/make/install-binary
@@ -3,6 +3,27 @@
 set -e
 rm -rf "$DEST"
 
+install_binary() {
+	local file="$1"
+	local target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/"
+	if [ "$(go env GOOS)" == "linux" ]; then
+		echo "Installing $(basename $file) to ${target}"
+		mkdir -p "$target"
+		cp -f -L "$file" "$target"
+	else
+		echo "Install is only supported on linux"
+		return 1
+	fi
+}
+
 (
-	source "${MAKEDIR}/install-binary-daemon"
+	DEST="$(dirname $DEST)/binary-daemon"
+	source "${MAKEDIR}/.binary-setup"
+	install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}"
+	install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}"
+	install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}"
+	install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}"
+	install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}"
+	install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}"
+	install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}"
 )
diff --git a/hack/make/install-binary-daemon b/hack/make/install-binary-daemon
deleted file mode 100644
index f6a4361..0000000
--- a/hack/make/install-binary-daemon
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-rm -rf "$DEST"
-
-install_binary() {
-	local file="$1"
-	local target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/"
-	if [ "$(go env GOOS)" == "linux" ]; then
-		echo "Installing $(basename $file) to ${target}"
-		mkdir -p "$target"
-		cp -f -L "$file" "$target"
-	else
-		echo "Install is only supported on linux"
-		return 1
-	fi
-}
-
-(
-	DEST="$(dirname $DEST)/binary-daemon"
-	source "${MAKEDIR}/.binary-setup"
-	install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}"
-	install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}"
-	install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}"
-	install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}"
-	install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}"
-	install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}"
-	install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}"
-)
diff --git a/hack/make/release-deb b/hack/make/release-deb
deleted file mode 100755
index acf4901..0000000
--- a/hack/make/release-deb
+++ /dev/null
@@ -1,163 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# This script creates the apt repos for the .deb files generated by hack/make/build-deb
-#
-# The following can then be used as apt sources:
-# 	deb http://apt.dockerproject.org/repo $distro-$release $version
-#
-# For example:
-#	deb http://apt.dockerproject.org/repo ubuntu-trusty main
-#	deb http://apt.dockerproject.org/repo ubuntu-trusty testing
-#	deb http://apt.dockerproject.org/repo debian-wheezy experimental
-#	deb http://apt.dockerproject.org/repo debian-jessie main
-#
-# ... and so on and so forth for the builds created by hack/make/build-deb
-
-: ${DOCKER_RELEASE_DIR:=$DEST}
-: ${GPG_KEYID:=releasedocker}
-APTDIR=$DOCKER_RELEASE_DIR/apt/repo
-
-# setup the apt repo (if it does not exist)
-mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists"
-
-# supported arches/sections
-arches=( amd64 i386 armhf ppc64le s390x )
-
-# Preserve existing components but don't add any non-existing ones
-for component in main testing experimental ; do
-	exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit)
-	if [ -n "$exists" ] ; then
-		components+=( $component )
-	fi
-done
-
-# set the component for the version being released
-component="main"
-
-if [[ "$VERSION" == *-rc* ]]; then
-	component="testing"
-fi
-
-if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
-	component="experimental"
-fi
-
-# Make sure our component is in the list of components
-if [[ ! "${components[*]}" =~ $component ]] ; then
-	components+=( $component )
-fi
-
-# create apt-ftparchive file on every run. This is essential to avoid
-# using stale versions of the config file that could cause unnecessary
-# refreshing of bits for EOL-ed releases.
-cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf"
-Dir {
-	ArchiveDir "${APTDIR}";
-	CacheDir "${APTDIR}/db";
-};
-
-Default {
-	Packages::Compress ". gzip bzip2";
-	Sources::Compress ". gzip bzip2";
-	Contents::Compress ". gzip bzip2";
-};
-
-TreeDefault {
-	BinCacheDB "packages-\$(SECTION)-\$(ARCH).db";
-	Directory "pool/\$(SECTION)";
-	Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages";
-	SrcDirectory "pool/\$(SECTION)";
-	Sources "\$(DIST)/\$(SECTION)/source/Sources";
-	Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)";
-	FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist";
-};
-EOF
-
-for dir in bundles/$VERSION/build-deb/*/; do
-	version="$(basename "$dir")"
-	suite="${version//debootstrap-}"
-
-	cat <<-EOF
-	Tree "dists/${suite}" {
-		Sections "${components[*]}";
-		Architectures "${arches[*]}";
-	}
-
-	EOF
-done >> "$APTDIR/conf/apt-ftparchive.conf"
-
-cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf"
-APT::FTPArchive::Release::Origin "Docker";
-APT::FTPArchive::Release::Components "${components[*]}";
-APT::FTPArchive::Release::Label "Docker APT Repository";
-APT::FTPArchive::Release::Architectures "${arches[*]}";
-EOF
-
-# release the debs
-for dir in bundles/$VERSION/build-deb/*/; do
-	version="$(basename "$dir")"
-	codename="${version//debootstrap-}"
-
-	tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)"
-	DEBFILE=( "$dir/docker-engine"*.deb )
-
-	# add the deb for each component for the distro version into the
-	# pool (if it is not there already)
-	mkdir -p "$APTDIR/pool/$component/d/docker-engine/"
- 	for deb in ${DEBFILE[@]}; do
-		d=$(basename "$deb")
-		# We do not want to generate a new deb if it has already been
-		# copied into the APTDIR
-		if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then
-			cp "$deb" "$tempdir/"
-			# if we have a $GPG_PASSPHRASE we may as well
-			# dpkg-sign before copying the deb into the pool
-			if [ ! -z "$GPG_PASSPHRASE" ]; then
-				dpkg-sig -g "--no-tty  --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE'" \
-					-k "$GPG_KEYID" --sign builder "$tempdir/$d"
-			fi
-			mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/"
-		fi
-	done
-
-	rm -rf "$tempdir"
-
-	# build the right directory structure, needed for apt-ftparchive
-	for arch in "${arches[@]}"; do
-		for c in "${components[@]}"; do
-			mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch"
-		done
-	done
-
-	# update the filelist for this codename/component
-	find "$APTDIR/pool/$component" \
-		-name *~${codename}*.deb -o \
-		-name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist"
-done
-
-# run the apt-ftparchive commands so we can have pinning
-apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf"
-
-for dir in bundles/$VERSION/build-deb/*/; do
-	version="$(basename "$dir")"
-	codename="${version//debootstrap-}"
-
-	apt-ftparchive \
-		-c "$APTDIR/conf/docker-engine-release.conf" \
-		-o "APT::FTPArchive::Release::Codename=$codename" \
-		-o "APT::FTPArchive::Release::Suite=$codename" \
-		release \
-		"$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release"
-
-	for arch in "${arches[@]}"; do
-		apt-ftparchive \
-			-c "$APTDIR/conf/docker-engine-release.conf" \
-			-o "APT::FTPArchive::Release::Codename=$codename" \
-			-o "APT::FTPArchive::Release::Suite=$codename" \
-			-o "APT::FTPArchive::Release::Components=$component" \
-			-o "APT::FTPArchive::Release::Architecture=$arch" \
-			release \
-			"$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release"
-	done
-done
diff --git a/hack/make/release-rpm b/hack/make/release-rpm
deleted file mode 100755
index 477d15b..0000000
--- a/hack/make/release-rpm
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm
-#
-# The following can then be used as a yum repo:
-# 	http://yum.dockerproject.org/repo/$release/$distro/$distro-version
-#
-# For example:
-# 	http://yum.dockerproject.org/repo/main/fedora/23
-# 	http://yum.dockerproject.org/repo/testing/centos/7
-# 	http://yum.dockerproject.org/repo/experimental/fedora/23
-# 	http://yum.dockerproject.org/repo/main/centos/7
-#
-# ... and so on and so forth for the builds created by hack/make/build-rpm
-
-: ${DOCKER_RELEASE_DIR:=$DEST}
-YUMDIR=$DOCKER_RELEASE_DIR/yum/repo
-: ${GPG_KEYID:=releasedocker}
-
-# get the release
-release="main"
-
-if [[ "$VERSION" == *-rc* ]]; then
-	release="testing"
-fi
-
-if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
-	release="experimental"
-fi
-
-# Setup the yum repo
-for dir in bundles/$VERSION/build-rpm/*/; do
-	version="$(basename "$dir")"
-	suite="${version##*-}"
-	distro="${version%-*}"
-
-	REPO=$YUMDIR/$release/$distro
-
-	# if the directory does not exist, initialize the yum repo
-	if [[ ! -d $REPO/$suite/Packages ]]; then
-		mkdir -p "$REPO/$suite/Packages"
-
-		createrepo --pretty "$REPO/$suite"
-	fi
-
-	# path to rpms
-	RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm )
-
-	# if we have a $GPG_PASSPHRASE we may as well
-	# sign the rpms before adding to repo
-	if [ ! -z $GPG_PASSPHRASE ]; then
-		# export our key to rpm import
-		gpg --armor --export "$GPG_KEYID" > /tmp/gpg
-		rpm --import /tmp/gpg
-
-		# sign the rpms
-		echo "yes" | setsid rpm \
-			--define "_gpg_name $GPG_KEYID" \
-			--define "_signature gpg" \
-			--define "__gpg_check_password_cmd /bin/true" \
-			--define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \
-			--resign "${RPMFILE[@]}"
-	fi
-
-	# copy the rpms to the packages folder
-	cp "${RPMFILE[@]}" "$REPO/$suite/Packages"
-
-	# update the repo
-	createrepo --pretty --update "$REPO/$suite"
-done
diff --git a/hack/make/sign-repos b/hack/make/sign-repos
deleted file mode 100755
index 61dbd7a..0000000
--- a/hack/make/sign-repos
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env bash
-
-# This script signs the deliverables from release-deb and release-rpm
-# with a designated GPG key.
-
-: ${DOCKER_RELEASE_DIR:=$DEST}
-: ${GPG_KEYID:=releasedocker}
-APTDIR=$DOCKER_RELEASE_DIR/apt/repo
-YUMDIR=$DOCKER_RELEASE_DIR/yum/repo
-
-if [ -z "$GPG_PASSPHRASE" ]; then
-	echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts'
-	exit 1
-fi
-
-if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then
-	echo >&2 'release-rpm or release-deb must be run before sign-repos'
-	exit 1
-fi
-
-sign_packages(){
-	# sign apt repo metadata
-	if [ -d $APTDIR ]; then
-		# create file with public key
-		gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg"
-
-		# sign the repo metadata
-		for F in $(find $APTDIR -name Release); do
-			if test "$F" -nt "$F.gpg" ; then
-				gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \
-					--digest-algo "sha512" \
-					--armor --sign --detach-sign \
-					--batch --yes \
-					--output "$F.gpg" "$F"
-			fi
-			inRelease="$(dirname "$F")/InRelease"
-			if test "$F" -nt "$inRelease" ; then
-				gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \
-					--digest-algo "sha512" \
-					--clearsign \
-					--batch --yes \
-					--output "$inRelease" "$F"
-			fi
-		done
-	fi
-
-	# sign yum repo metadata
-	if [ -d $YUMDIR ]; then
-		# create file with public key
-		gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg"
-
-		# sign the repo metadata
-		for F in $(find $YUMDIR -name repomd.xml); do
-			if test "$F" -nt "$F.asc" ; then
-				gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \
-					--digest-algo "sha512" \
-					--armor --sign --detach-sign \
-					--batch --yes \
-					--output "$F.asc" "$F"
-			fi
-		done
-	fi
-}
-
-sign_packages
diff --git a/hack/make/test-integration b/hack/make/test-integration
index 0100ac9..c807cd4 100755
--- a/hack/make/test-integration
+++ b/hack/make/test-integration
@@ -1,7 +1,6 @@
 #!/usr/bin/env bash
 set -e -o pipefail
 
-source "${MAKEDIR}/.go-autogen"
 source hack/make/.integration-test-helpers
 
 (
diff --git a/hack/make/test-old-apt-repo b/hack/make/test-old-apt-repo
deleted file mode 100755
index e92b20e..0000000
--- a/hack/make/test-old-apt-repo
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-versions=( 1.3.3 1.4.1 1.5.0 1.6.2 )
-
-install() {
-	local version=$1
-	local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX)
-	local dockerfile="${tmpdir}/Dockerfile"
-	cat <<-EOF > "$dockerfile"
-	FROM debian:jessie
-	ENV VERSION ${version}
-	RUN apt-get update && apt-get install -y \
-		apt-transport-https \
-		ca-certificates \
-		--no-install-recommends
-	RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list
-	RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
-		--recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
-	RUN apt-get update && apt-get install -y \
-		lxc-docker-\${VERSION}
-	EOF
-
-	docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir
-}
-
-for v in "${versions[@]}"; do
-	install "$v"
-done
diff --git a/hack/make/test-unit b/hack/make/test-unit
deleted file mode 100644
index d985a6e..0000000
--- a/hack/make/test-unit
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-echo "DEPRECATED: use hack/test/unit instead of hack/make.sh test-unit" >&2
-
-$SCRIPTDIR/test/unit 2>&1 | tee -a "$DEST/test.log"
diff --git a/hack/make/tgz b/hack/make/tgz
deleted file mode 100644
index 1fd37b6..0000000
--- a/hack/make/tgz
+++ /dev/null
@@ -1,2 +0,0 @@
-#!/usr/bin/env bash
-echo "tgz is deprecated"
diff --git a/hack/make/ubuntu b/hack/make/ubuntu
deleted file mode 100644
index ad3f1d7..0000000
--- a/hack/make/ubuntu
+++ /dev/null
@@ -1,190 +0,0 @@
-#!/usr/bin/env bash
-
-PKGVERSION="${VERSION//-/'~'}"
-# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
-if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
-	GIT_UNIX="$(git log -1 --pretty='%at')"
-	GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')"
-	GIT_COMMIT="$(git log -1 --pretty='%h')"
-	GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}"
-	# GIT_VERSION is now something like 'git20150128.112847.0.17e840a'
-	PKGVERSION="$PKGVERSION~$GIT_VERSION"
-fi
-
-# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false
-# true
-# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false
-# true
-# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false
-# true
-
-# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a
-
-PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
-PACKAGE_URL="https://www.docker.com/"
-PACKAGE_MAINTAINER="support@docker.com"
-PACKAGE_DESCRIPTION="Linux container runtime
-Docker complements LXC with a high-level API which operates at the process
-level. It runs unix processes with strong guarantees of isolation and
-repeatability across servers.
-Docker is a great building block for automating distributed systems:
-large-scale web deployments, database clusters, continuous deployment systems,
-private PaaS, service-oriented architectures, etc."
-PACKAGE_LICENSE="Apache-2.0"
-
-# Build docker as an ubuntu package using FPM and REPREPRO (sue me).
-# bundle_binary must be called first.
-bundle_ubuntu() {
-	DIR="$ABS_DEST/build"
-
-	# Include our udev rules
-	mkdir -p "$DIR/etc/udev/rules.d"
-	cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/"
-
-	# Include our init scripts
-	mkdir -p "$DIR/etc/init"
-	cp contrib/init/upstart/docker.conf "$DIR/etc/init/"
-	mkdir -p "$DIR/etc/init.d"
-	cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/"
-	mkdir -p "$DIR/etc/default"
-	cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker"
-	mkdir -p "$DIR/lib/systemd/system"
-	cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/"
-
-	# Include contributed completions
-	mkdir -p "$DIR/etc/bash_completion.d"
-	cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/"
-	mkdir -p "$DIR/usr/share/zsh/vendor-completions"
-	cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/"
-	mkdir -p "$DIR/etc/fish/completions"
-	cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/"
-
-	# Include man pages
-	make manpages
-	manRoot="$DIR/usr/share/man"
-	mkdir -p "$manRoot"
-	for manDir in man/man?; do
-		manBase="$(basename "$manDir")" # "man1"
-		for manFile in "$manDir"/*; do
-			manName="$(basename "$manFile")" # "docker-build.1"
-			mkdir -p "$manRoot/$manBase"
-			gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz"
-		done
-	done
-
-	# Copy the binary
-	# This will fail if the binary bundle hasn't been built
-	mkdir -p "$DIR/usr/bin"
-	cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker"
-
-	# Generate postinst/prerm/postrm scripts
-	cat > "$DEST/postinst" <<'EOF'
-#!/bin/sh
-set -e
-set -u
-
-if [ "$1" = 'configure' ] && [ -z "$2" ]; then
-	if ! getent group docker > /dev/null; then
-		groupadd --system docker
-	fi
-fi
-
-if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then
-	# we only need to do this if upstart isn't in charge
-	update-rc.d docker defaults > /dev/null || true
-fi
-if [ -n "$2" ]; then
-	_dh_action=restart
-else
-	_dh_action=start
-fi
-service docker $_dh_action 2>/dev/null || true
-
-#DEBHELPER#
-EOF
-	cat > "$DEST/prerm" <<'EOF'
-#!/bin/sh
-set -e
-set -u
-
-service docker stop 2>/dev/null || true
-
-#DEBHELPER#
-EOF
-	cat > "$DEST/postrm" <<'EOF'
-#!/bin/sh
-set -e
-set -u
-
-if [ "$1" = "purge" ] ; then
-	update-rc.d docker remove > /dev/null || true
-fi
-
-# In case this system is running systemd, we make systemd reload the unit files
-# to pick up changes.
-if [ -d /run/systemd/system ] ; then
-	systemctl --system daemon-reload > /dev/null || true
-fi
-
-#DEBHELPER#
-EOF
-	# TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way
-	chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm"
-
-	(
-		# switch directories so we create *.deb in the right folder
-		cd "$DEST"
-
-		# create lxc-docker-VERSION package
-		fpm -s dir -C "$DIR" \
-			--name "lxc-docker-$VERSION" --version "$PKGVERSION" \
-			--after-install "$ABS_DEST/postinst" \
-			--before-remove "$ABS_DEST/prerm" \
-			--after-remove "$ABS_DEST/postrm" \
-			--architecture "$PACKAGE_ARCHITECTURE" \
-			--prefix / \
-			--depends iptables \
-			--deb-recommends aufs-tools \
-			--deb-recommends ca-certificates \
-			--deb-recommends git \
-			--deb-recommends xz-utils \
-			--deb-recommends 'cgroupfs-mount | cgroup-lite' \
-			--deb-suggests apparmor \
-			--description "$PACKAGE_DESCRIPTION" \
-			--maintainer "$PACKAGE_MAINTAINER" \
-			--conflicts docker \
-			--conflicts docker.io \
-			--conflicts lxc-docker-virtual-package \
-			--provides lxc-docker \
-			--provides lxc-docker-virtual-package \
-			--replaces lxc-docker \
-			--replaces lxc-docker-virtual-package \
-			--url "$PACKAGE_URL" \
-			--license "$PACKAGE_LICENSE" \
-			--config-files /etc/udev/rules.d/80-docker.rules \
-			--config-files /etc/init/docker.conf \
-			--config-files /etc/init.d/docker \
-			--config-files /etc/default/docker \
-			--deb-compression gz \
-			-t deb .
-		# TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available
-
-		# create empty lxc-docker wrapper package
-		fpm -s empty \
-			--name lxc-docker --version "$PKGVERSION" \
-			--architecture "$PACKAGE_ARCHITECTURE" \
-			--depends lxc-docker-$VERSION \
-			--description "$PACKAGE_DESCRIPTION" \
-			--maintainer "$PACKAGE_MAINTAINER" \
-			--url "$PACKAGE_URL" \
-			--license "$PACKAGE_LICENSE" \
-			--deb-compression gz \
-			-t deb
-	)
-
-	# clean up after ourselves so we have a clean output directory
-	rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm"
-	rm -r "$DIR"
-}
-
-bundle_ubuntu
diff --git a/hack/make/update-apt-repo b/hack/make/update-apt-repo
deleted file mode 100755
index 3a80c94..0000000
--- a/hack/make/update-apt-repo
+++ /dev/null
@@ -1,70 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo.
-# This script is a "fix all" for any sort of problems that might have occurred with
-# the Release or Package files in the repo.
-# It should only be used in the rare case of extreme emergencies to regenerate
-# Release and Package files for the apt repo.
-#
-# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running
-# this script.
-
-: ${DOCKER_RELEASE_DIR:=$DEST}
-APTDIR=$DOCKER_RELEASE_DIR/apt/repo
-
-# supported arches/sections
-arches=( amd64 i386 )
-
-# Preserve existing components but don't add any non-existing ones
-for component in main testing experimental ; do
-	if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then
-		components+=( $component )
-	fi
-done
-
-dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) )
-
-# override component if it is set
-if [ "$COMPONENT" ]; then
-	components=( $COMPONENT )
-fi
-
-# release the debs
-for version in "${dists[@]}"; do
-	for component in "${components[@]}"; do
-		codename="${version//debootstrap-}"
-
-		# update the filelist for this codename/component
-		find "$APTDIR/pool/$component" \
-			-name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist"
-	done
-done
-
-# run the apt-ftparchive commands so we can have pinning
-apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf"
-
-for dist in "${dists[@]}"; do
-	version=$(basename "$dist")
-	for component in "${components[@]}"; do
-		codename="${version//debootstrap-}"
-
-		apt-ftparchive \
-			-o "APT::FTPArchive::Release::Codename=$codename" \
-			-o "APT::FTPArchive::Release::Suite=$codename" \
-			-c "$APTDIR/conf/docker-engine-release.conf" \
-			release \
-			"$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release"
-
-		for arch in "${arches[@]}"; do
-			apt-ftparchive \
-				-o "APT::FTPArchive::Release::Codename=$codename" \
-				-o "APT::FTPArchive::Release::Suite=$codename" \
-				-o "APT::FTPArchive::Release::Component=$component" \
-				-o "APT::FTPArchive::Release::Architecture=$arch" \
-				-c "$APTDIR/conf/docker-engine-release.conf" \
-				release \
-				"$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release"
-		done
-	done
-done
diff --git a/hack/make/win b/hack/make/win
deleted file mode 100644
index bc6d510..0000000
--- a/hack/make/win
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# explicit list of os/arch combos that support being a daemon
-declare -A daemonSupporting
-daemonSupporting=(
-	[linux/amd64]=1
-	[windows/amd64]=1
-)
-platform="windows/amd64"
-export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION
-mkdir -p "$DEST"
-ABS_DEST="$(cd "$DEST" && pwd -P)"
-export GOOS=${platform%/*}
-export GOARCH=${platform##*/}
-if [ -z "${daemonSupporting[$platform]}" ]; then
-	export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms
-	export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported
-fi
-source "${MAKEDIR}/binary"
diff --git a/hack/release.sh b/hack/release.sh
deleted file mode 100755
index 04f15be..0000000
--- a/hack/release.sh
+++ /dev/null
@@ -1,317 +0,0 @@
-#!/usr/bin/env bash
-set -e
-
-# This script looks for bundles built by make.sh, and releases them on a
-# public S3 bucket.
-#
-# Bundles should be available for the VERSION string passed as argument.
-#
-# The correct way to call this script is inside a container built by the
-# official Dockerfile at the root of the Docker source code. The Dockerfile,
-# make.sh and release.sh should all be from the same source code revision.
-
-set -o pipefail
-
-# Print a usage message and exit.
-usage() {
-	cat >&2 <<'EOF'
-To run, I need:
-- to be in a container generated by the Dockerfile at the top of the Docker
-  repository;
-- to be provided with the location of an S3 bucket and path, in
-  environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
-- to be provided with AWS credentials for this S3 bucket, in environment
-  variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
-- a generous amount of good will and nice manners.
-The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
-
-docker run -e AWS_S3_BUCKET=test.docker.com \
-           -e AWS_ACCESS_KEY_ID     \
-           -e AWS_SECRET_ACCESS_KEY \
-           -e AWS_DEFAULT_REGION    \
-           -it --privileged         \
-           docker ./hack/release.sh
-EOF
-	exit 1
-}
-
-[ "$AWS_S3_BUCKET" ] || usage
-[ "$AWS_ACCESS_KEY_ID" ] || usage
-[ "$AWS_SECRET_ACCESS_KEY" ] || usage
-[ -d /go/src/github.com/docker/docker ] || usage
-cd /go/src/github.com/docker/docker
-[ -x hack/make.sh ] || usage
-
-export AWS_DEFAULT_REGION
-: ${AWS_DEFAULT_REGION:=us-west-1}
-
-AWS_CLI=${AWS_CLI:-'aws'}
-
-RELEASE_BUNDLES=(
-	binary
-	cross
-	tgz
-)
-
-if [ "$1" != '--release-regardless-of-test-failure' ]; then
-	RELEASE_BUNDLES=(
-		test-unit
-		"${RELEASE_BUNDLES[@]}"
-		test-integration
-	)
-fi
-
-VERSION=$(< VERSION)
-BUCKET=$AWS_S3_BUCKET
-BUCKET_PATH=$BUCKET
-[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
-
-if command -v git &> /dev/null && git rev-parse &> /dev/null; then
-	if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
-		echo "You cannot run the release script on a repo with uncommitted changes"
-		usage
-	fi
-fi
-
-# These are the 2 keys we've used to sign the deb's
-#   release (get.docker.com)
-#	GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
-#   test    (test.docker.com)
-#	GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
-
-setup_s3() {
-	echo "Setting up S3"
-	# Try creating the bucket. Ignore errors (it might already exist).
-	$AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true
-	# Check access to the bucket.
-	$AWS_CLI s3 ls "s3://$BUCKET" >/dev/null
-	# Make the bucket accessible through website endpoints.
-	$AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET"
-}
-
-# write_to_s3 uploads the contents of standard input to the specified S3 url.
-write_to_s3() {
-	DEST=$1
-	F=`mktemp`
-	cat > "$F"
-	$AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
-	rm -f "$F"
-}
-
-s3_url() {
-	case "$BUCKET" in
-		get.docker.com|test.docker.com|experimental.docker.com)
-			echo "https://$BUCKET_PATH"
-			;;
-		*)
-			BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
-			if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
-				echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
-			else
-				echo "$BASE_URL"
-			fi
-			;;
-	esac
-}
-
-build_all() {
-	echo "Building release"
-	if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
-		echo >&2
-		echo >&2 'The build or tests appear to have failed.'
-		echo >&2
-		echo >&2 'You, as the release  maintainer, now have a couple options:'
-		echo >&2 '- delay release and fix issues'
-		echo >&2 '- delay release and fix issues'
-		echo >&2 '- did we mention how important this is?  issues need fixing :)'
-		echo >&2
-		echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
-		echo >&2 ' really knows all the hairy problems at hand with the current release'
-		echo >&2 ' issues) may bypass this checking by running this script again with the'
-		echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
-		echo >&2 ' running the test suite, and will only build the binaries and packages.  Please'
-		echo >&2 ' avoid using this if at all possible.'
-		echo >&2
-		echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
-		echo >&2 ' should be used.  If there are release issues, we should always err on the'
-		echo >&2 ' side of caution.'
-		echo >&2
-		exit 1
-	fi
-}
-
-upload_release_build() {
-	src="$1"
-	dst="$2"
-	latest="$3"
-
-	echo
-	echo "Uploading $src"
-	echo "  to $dst"
-	echo
-	$AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst"
-	if [ "$latest" ]; then
-		echo
-		echo "Copying to $latest"
-		echo
-		$AWS_CLI s3 cp --acl public-read "$dst" "$latest"
-	fi
-
-	# get hash files too (see hash_files() in hack/make.sh)
-	for hashAlgo in md5 sha256; do
-		if [ -e "$src.$hashAlgo" ]; then
-			echo
-			echo "Uploading $src.$hashAlgo"
-			echo "  to $dst.$hashAlgo"
-			echo
-			$AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
-			if [ "$latest" ]; then
-				echo
-				echo "Copying to $latest.$hashAlgo"
-				echo
-				$AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
-			fi
-		fi
-	done
-}
-
-release_build() {
-	echo "Releasing binaries"
-	GOOS=$1
-	GOARCH=$2
-
-	binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
-	tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
-	binary=docker-$VERSION
-	zipExt=".tgz"
-	binaryExt=""
-	tgz=$binary$zipExt
-
-	latestBase=
-	if [ -z "$NOLATEST" ]; then
-		latestBase=docker-latest
-	fi
-
-	# we need to map our GOOS and GOARCH to uname values
-	# see https://en.wikipedia.org/wiki/Uname
-	# ie, GOOS=linux -> "uname -s"=Linux
-
-	s3Os=$GOOS
-	case "$s3Os" in
-		darwin)
-			s3Os=Darwin
-			;;
-		freebsd)
-			s3Os=FreeBSD
-			;;
-		linux)
-			s3Os=Linux
-			;;
-		solaris)
-			echo skipping solaris release
-			return 0
-			;;
-		windows)
-			# this is windows use the .zip and .exe extensions for the files.
-			s3Os=Windows
-			zipExt=".zip"
-			binaryExt=".exe"
-			tgz=$binary$zipExt
-			binary+=$binaryExt
-			;;
-		*)
-			echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
-			exit 1
-			;;
-	esac
-
-	s3Arch=$GOARCH
-	case "$s3Arch" in
-		amd64)
-			s3Arch=x86_64
-			;;
-		386)
-			s3Arch=i386
-			;;
-		arm)
-			s3Arch=armel
-			# someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
-			;;
-		*)
-			echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
-			exit 1
-			;;
-	esac
-
-	s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
-	# latest=
-	latestTgz=
-	if [ "$latestBase" ]; then
-		# commented out since we aren't uploading binaries right now.
-		# latest="$s3Dir/$latestBase$binaryExt"
-		# we don't include the $binaryExt because we don't want docker.exe.zip
-		latestTgz="$s3Dir/$latestBase$zipExt"
-	fi
-
-	if [ ! -f "$tgzDir/$tgz" ]; then
-		echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
-		exit 1
-	fi
-	# disable binary uploads for now. Only providing tgz downloads
-	# upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
-	upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
-}
-
-# Upload binaries and tgz files to S3
-release_binaries() {
-	[ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || {
-		echo >&2 './hack/make.sh must be run before release_binaries'
-		exit 1
-	}
-
-	for d in bundles/$VERSION/cross/*/*; do
-		GOARCH="$(basename "$d")"
-		GOOS="$(basename "$(dirname "$d")")"
-		release_build "$GOOS" "$GOARCH"
-	done
-
-	# TODO create redirect from builds/*/i686 to builds/*/i386
-
-	cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
-# To install, run the following commands as root:
-curl -fsSLO $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && tar --strip-components=1 -xvzf docker-$VERSION.tgz -C /usr/local/bin
-
-# Then start docker in daemon mode:
-/usr/local/bin/dockerd
-EOF
-
-	# Add redirect at /builds/info for URL-backwards-compatibility
-	rm -rf /tmp/emptyfile && touch /tmp/emptyfile
-	$AWS_CLI s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
-
-	if [ -z "$NOLATEST" ]; then
-		echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
-		echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
-	fi
-}
-
-main() {
-	[ "$SKIP_RELEASE_BUILD" = '1' ] || build_all
-	setup_s3
-	release_binaries
-}
-
-main
-
-echo
-echo
-echo "Release complete; see $(s3_url)"
-echo "Use the following text to announce the release:"
-echo
-echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
-echo
-echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
-echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
-echo "Windows 64bit zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
-echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
-echo
diff --git a/hack/test/e2e-run.sh b/hack/test/e2e-run.sh
new file mode 100755
index 0000000..448f120
--- /dev/null
+++ b/hack/test/e2e-run.sh
@@ -0,0 +1,41 @@
+#!/usr/bin/env bash
+set -e
+
+TESTFLAGS=${TESTFLAGS:-""}
+# Currently only DockerSuite and DockerNetworkSuite have been adapted for E2E testing
+TESTFLAGS_LEGACY=${TESTFLAGS_LEGACY:-""}
+TIMEOUT=${TIMEOUT:-60m}
+
+SCRIPTDIR="$(dirname ${BASH_SOURCE[0]})"
+
+export DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:-amd64}
+
+run_test_integration() {
+  run_test_integration_suites
+  run_test_integration_legacy_suites
+}
+
+run_test_integration_suites() {
+  local flags="-test.timeout=${TIMEOUT} $TESTFLAGS"
+  for dir in /tests/integration/*; do
+    if ! (
+      cd $dir
+      echo "Running $PWD"
+      ./test.main $flags
+    ); then exit 1; fi
+  done
+}
+
+run_test_integration_legacy_suites() {
+  (
+    flags="-check.timeout=${TIMEOUT} -test.timeout=360m $TESTFLAGS_LEGACY"
+    cd /tests/integration-cli
+    echo "Running $PWD"
+    ./test.main $flags
+  )
+}
+
+bash $SCRIPTDIR/ensure-emptyfs.sh
+
+echo "Run integration tests"
+run_test_integration
diff --git a/hack/test/unit b/hack/test/unit
index 2b07089..76e5d46 100755
--- a/hack/test/unit
+++ b/hack/test/unit
@@ -17,9 +17,6 @@
 TESTDIRS="${TESTDIRS:-"./..."}"
 
 exclude_paths="/vendor/|/integration"
-if [ "$(go env GOHOSTOS)" = 'solaris' ]; then
-	exclude_paths="$exclude_paths|/daemon/graphdriver"
-fi
 pkg_list=$(go list $TESTDIRS | grep -vE "($exclude_paths)")
 
 go test -cover "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list
diff --git a/hack/validate/gometalinter b/hack/validate/gometalinter
index 9830659..ae411e8 100755
--- a/hack/validate/gometalinter
+++ b/hack/validate/gometalinter
@@ -3,4 +3,9 @@
 
 SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 
-gometalinter --config $SCRIPTDIR/gometalinter.json ./...
+# CI platforms differ, so per-platform GOMETALINTER_OPTS can be set
+# from a platform-specific Dockerfile, otherwise let's just set
+# (somewhat pessimistic) default of 10 minutes.
+gometalinter \
+	${GOMETALINTER_OPTS:--deadine 10m} \
+	--config $SCRIPTDIR/gometalinter.json ./...
diff --git a/hack/validate/gometalinter.json b/hack/validate/gometalinter.json
index 2e0a6c2..8e8da10 100644
--- a/hack/validate/gometalinter.json
+++ b/hack/validate/gometalinter.json
@@ -1,6 +1,6 @@
 {
   "Vendor": true,
-  "Deadline": "2m",
+  "EnableGC": true,
   "Sort": ["linter", "severity", "path"],
   "Exclude": [
     ".*\\.pb\\.go",
@@ -8,15 +8,15 @@
     "api/types/container/container_.*",
     "integration-cli/"
   ],
-  "Skip": [
-    "integration-cli/"
-  ],
+  "Skip": ["integration-cli/"],
 
   "Enable": [
     "deadcode",
     "gofmt",
     "goimports",
     "golint",
+    "gosimple",
+    "ineffassign",
     "interfacer",
     "unconvert",
     "vet"
diff --git a/image/fs_test.go b/image/fs_test.go
index 5464ab5..2672524 100644
--- a/image/fs_test.go
+++ b/image/fs_test.go
@@ -1,7 +1,6 @@
 package image
 
 import (
-	"bytes"
 	"crypto/rand"
 	"crypto/sha256"
 	"encoding/hex"
@@ -12,7 +11,7 @@
 	"testing"
 
 	"github.com/docker/docker/internal/testutil"
-	"github.com/opencontainers/go-digest"
+	digest "github.com/opencontainers/go-digest"
 	"github.com/stretchr/testify/assert"
 )
 
@@ -112,9 +111,7 @@
 		actual, err := store.GetMetadata(tc.id, tc.key)
 		assert.NoError(t, err)
 
-		if bytes.Compare(actual, tc.value) != 0 {
-			t.Fatalf("Metadata expected %q, got %q", tc.value, actual)
-		}
+		assert.Equal(t, tc.value, actual)
 	}
 
 	_, err = store.GetMetadata(id2, "tkey2")
@@ -183,9 +180,7 @@
 	for _, tc := range tcases {
 		data, err := store.Get(tc.expected)
 		assert.NoError(t, err)
-		if bytes.Compare(data, tc.input) != 0 {
-			t.Fatalf("expected data %q, got %q", tc.input, data)
-		}
+		assert.Equal(t, tc.input, data)
 	}
 }
 
diff --git a/image/image.go b/image/image.go
index c63aec5..69e4abd 100644
--- a/image/image.go
+++ b/image/image.go
@@ -96,8 +96,8 @@
 	return img.Config
 }
 
-// Platform returns the image's operating system. If not populated, defaults to the host runtime OS.
-func (img *Image) Platform() string {
+// OperatingSystem returns the image's operating system. If not populated, defaults to the host runtime OS.
+func (img *Image) OperatingSystem() string {
 	os := img.OS
 	if os == "" {
 		os = runtime.GOOS
diff --git a/image/image_test.go b/image/image_test.go
index e04587e..ba42db7 100644
--- a/image/image_test.go
+++ b/image/image_test.go
@@ -2,6 +2,7 @@
 
 import (
 	"encoding/json"
+	"runtime"
 	"sort"
 	"strings"
 	"testing"
@@ -54,6 +55,39 @@
 	}
 }
 
+func TestImage(t *testing.T) {
+	cid := "50a16564e727"
+	config := &container.Config{
+		Hostname:   "hostname",
+		Domainname: "domain",
+		User:       "root",
+	}
+	os := runtime.GOOS
+
+	img := &Image{
+		V1Image: V1Image{
+			Config: config,
+		},
+		computedID: ID(cid),
+	}
+
+	assert.Equal(t, cid, img.ImageID())
+	assert.Equal(t, cid, img.ID().String())
+	assert.Equal(t, os, img.OperatingSystem())
+	assert.Equal(t, config, img.RunConfig())
+}
+
+func TestImageOSNotEmpty(t *testing.T) {
+	os := "os"
+	img := &Image{
+		V1Image: V1Image{
+			OS: os,
+		},
+		OSVersion: "osversion",
+	}
+	assert.Equal(t, os, img.OperatingSystem())
+}
+
 func TestNewChildImageFromImageWithRootFS(t *testing.T) {
 	rootFS := NewRootFS()
 	rootFS.Append(layer.DiffID("ba5e"))
diff --git a/image/store.go b/image/store.go
index 17769f4..0484285 100644
--- a/image/store.go
+++ b/image/store.go
@@ -47,17 +47,17 @@
 	images    map[ID]*imageMeta
 	fs        StoreBackend
 	digestSet *digestset.Set
-	platform  string
+	os        string
 }
 
 // NewImageStore returns new store object for given layer store
-func NewImageStore(fs StoreBackend, platform string, ls LayerGetReleaser) (Store, error) {
+func NewImageStore(fs StoreBackend, os string, ls LayerGetReleaser) (Store, error) {
 	is := &store{
 		ls:        ls,
 		images:    make(map[ID]*imageMeta),
 		fs:        fs,
 		digestSet: digestset.NewSet(),
-		platform:  platform,
+		os:        os,
 	}
 
 	// load all current images and retain layers
@@ -118,11 +118,11 @@
 		return "", err
 	}
 
-	// TODO @jhowardmsft - LCOW Support. This will need revisiting.
+	// TODO @jhowardmsft - LCOW Support. This will need revisiting when coalescing the image stores.
 	// Integrity check - ensure we are creating something for the correct platform
 	if system.LCOWSupported() {
-		if strings.ToLower(img.Platform()) != strings.ToLower(is.platform) {
-			return "", fmt.Errorf("cannot create entry for platform %q in image store for platform %q", img.Platform(), is.platform)
+		if strings.ToLower(img.OperatingSystem()) != strings.ToLower(is.os) {
+			return "", fmt.Errorf("cannot create entry for operating system %q in image store for operating system %q", img.OperatingSystem(), is.os)
 		}
 	}
 
diff --git a/image/store_test.go b/image/store_test.go
index e6c1746..23a60a9 100644
--- a/image/store_test.go
+++ b/image/store_test.go
@@ -41,10 +41,10 @@
 	assert.Equal(t, "abc", img1.Comment)
 	assert.Equal(t, "def", img2.Comment)
 
-	p, err := is.GetParent(ID(id1))
+	_, err = is.GetParent(ID(id1))
 	testutil.ErrorContains(t, err, "failed to read metadata")
 
-	p, err = is.GetParent(ID(id2))
+	p, err := is.GetParent(ID(id2))
 	assert.NoError(t, err)
 	assert.Equal(t, ID(id1), p)
 
diff --git a/image/tarexport/load.go b/image/tarexport/load.go
index 480400c..81beaf4 100644
--- a/image/tarexport/load.go
+++ b/image/tarexport/load.go
@@ -82,8 +82,7 @@
 		if err := checkCompatibleOS(img.OS); err != nil {
 			return err
 		}
-		var rootFS image.RootFS
-		rootFS = *img.RootFS
+		rootFS := *img.RootFS
 		rootFS.DiffIDs = nil
 
 		if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual {
@@ -91,13 +90,13 @@
 		}
 
 		// On Windows, validate the platform, defaulting to windows if not present.
-		platform := layer.Platform(img.OS)
+		os := layer.OS(img.OS)
 		if runtime.GOOS == "windows" {
-			if platform == "" {
-				platform = "windows"
+			if os == "" {
+				os = "windows"
 			}
-			if (platform != "windows") && (platform != "linux") {
-				return fmt.Errorf("configuration for this image has an unsupported platform: %s", platform)
+			if (os != "windows") && (os != "linux") {
+				return fmt.Errorf("configuration for this image has an unsupported operating system: %s", os)
 			}
 		}
 
@@ -110,7 +109,7 @@
 			r.Append(diffID)
 			newLayer, err := l.ls.Get(r.ChainID())
 			if err != nil {
-				newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), platform, m.LayerSources[diffID], progressOutput)
+				newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), os, m.LayerSources[diffID], progressOutput)
 				if err != nil {
 					return err
 				}
@@ -177,7 +176,7 @@
 	return l.is.SetParent(id, parentID)
 }
 
-func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, platform layer.Platform, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) {
+func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, os layer.OS, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) {
 	// We use system.OpenSequential to use sequential file access on Windows, avoiding
 	// depleting the standby list. On Linux, this equates to a regular os.Open.
 	rawTar, err := system.OpenSequential(filename)
@@ -207,9 +206,9 @@
 	defer inflatedLayerData.Close()
 
 	if ds, ok := l.ls.(layer.DescribableStore); ok {
-		return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), platform, foreignSrc)
+		return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), os, foreignSrc)
 	}
-	return l.ls.Register(inflatedLayerData, rootFS.ChainID(), platform)
+	return l.ls.Register(inflatedLayerData, rootFS.ChainID(), os)
 }
 
 func (l *tarexporter) setLoadedTag(ref reference.Named, imgID digest.Digest, outStream io.Writer) error {
diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go
index 87517e7..49504c6 100644
--- a/integration-cli/check_test.go
+++ b/integration-cli/check_test.go
@@ -60,6 +60,12 @@
 
 func TestMain(m *testing.M) {
 	dockerBinary = testEnv.DockerBinary()
+	err := ienv.EnsureFrozenImagesLinux(&testEnv.Execution)
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+
 	testEnv.Print()
 	os.Exit(m.Run())
 }
@@ -67,7 +73,7 @@
 func Test(t *testing.T) {
 	cli.SetTestEnvironment(testEnv)
 	fakestorage.SetTestEnvironment(&testEnv.Execution)
-	ienv.ProtectImages(t, &testEnv.Execution)
+	ienv.ProtectAll(t, &testEnv.Execution)
 	check.TestingT(t)
 }
 
@@ -79,6 +85,9 @@
 }
 
 func (s *DockerSuite) OnTimeout(c *check.C) {
+	if !testEnv.IsLocalDaemon() {
+		return
+	}
 	path := filepath.Join(os.Getenv("DEST"), "docker.pid")
 	b, err := ioutil.ReadFile(path)
 	if err != nil {
@@ -91,7 +100,7 @@
 	}
 
 	daemonPid := int(rawPid)
-	if daemonPid > 0 && testEnv.IsLocalDaemon() {
+	if daemonPid > 0 {
 		daemon.SignalDaemonDump(daemonPid)
 	}
 }
@@ -117,7 +126,7 @@
 }
 
 func (s *DockerRegistrySuite) SetUpTest(c *check.C) {
-	testRequires(c, DaemonIsLinux, registry.Hosting)
+	testRequires(c, DaemonIsLinux, registry.Hosting, SameHostDaemon)
 	s.reg = setupRegistry(c, false, "", "")
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
 		Experimental: testEnv.ExperimentalDaemon(),
@@ -151,7 +160,7 @@
 }
 
 func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) {
-	testRequires(c, DaemonIsLinux, registry.Hosting, NotArm64)
+	testRequires(c, DaemonIsLinux, registry.Hosting, NotArm64, SameHostDaemon)
 	s.reg = setupRegistry(c, true, "", "")
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
 		Experimental: testEnv.ExperimentalDaemon(),
@@ -185,7 +194,7 @@
 }
 
 func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) {
-	testRequires(c, DaemonIsLinux, registry.Hosting)
+	testRequires(c, DaemonIsLinux, registry.Hosting, SameHostDaemon)
 	s.reg = setupRegistry(c, false, "htpasswd", "")
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
 		Experimental: testEnv.ExperimentalDaemon(),
@@ -221,7 +230,7 @@
 }
 
 func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) {
-	testRequires(c, DaemonIsLinux, registry.Hosting)
+	testRequires(c, DaemonIsLinux, registry.Hosting, SameHostDaemon)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
 		Experimental: testEnv.ExperimentalDaemon(),
 	})
@@ -316,7 +325,7 @@
 }
 
 func (s *DockerSwarmSuite) SetUpTest(c *check.C) {
-	testRequires(c, DaemonIsLinux)
+	testRequires(c, DaemonIsLinux, SameHostDaemon)
 }
 
 func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *daemon.Swarm {
@@ -330,7 +339,7 @@
 	args := []string{"--iptables=false", "--swarm-default-advertise-addr=lo"} // avoid networking conflicts
 	d.StartWithBusybox(c, args...)
 
-	if joinSwarm == true {
+	if joinSwarm {
 		if len(s.daemons) > 0 {
 			tokens := s.daemons[0].JoinTokens(c)
 			token := tokens.Worker
@@ -468,7 +477,7 @@
 }
 
 func (ps *DockerPluginSuite) SetUpSuite(c *check.C) {
-	testRequires(c, DaemonIsLinux)
+	testRequires(c, DaemonIsLinux, registry.Hosting)
 	ps.registry = setupRegistry(c, false, "", "")
 
 	ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
diff --git a/integration-cli/cli/build/fakestorage/fixtures.go b/integration-cli/cli/build/fakestorage/fixtures.go
index 8a6bb13..2eb7284 100644
--- a/integration-cli/cli/build/fakestorage/fixtures.go
+++ b/integration-cli/cli/build/fakestorage/fixtures.go
@@ -30,7 +30,7 @@
 	}
 	defer os.RemoveAll(tmp)
 
-	goos := testEnv.DaemonInfo.OSType
+	goos := testEnv.OSType
 	if goos == "" {
 		goos = "linux"
 	}
@@ -39,27 +39,34 @@
 		goarch = "amd64"
 	}
 
-	goCmd, lookErr := exec.LookPath("go")
-	if lookErr != nil {
-		t.Fatalf("could not build http server: %v", lookErr)
-	}
-
-	cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver")
-	cmd.Env = append(os.Environ(), []string{
-		"CGO_ENABLED=0",
-		"GOOS=" + goos,
-		"GOARCH=" + goarch,
-	}...)
-	var out []byte
-	if out, err = cmd.CombinedOutput(); err != nil {
-		t.Fatalf("could not build http server: %s", string(out))
-	}
-
 	cpCmd, lookErr := exec.LookPath("cp")
 	if lookErr != nil {
 		t.Fatalf("could not build http server: %v", lookErr)
 	}
-	if out, err = exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil {
+
+	if _, err = os.Stat("../contrib/httpserver/httpserver"); os.IsNotExist(err) {
+		goCmd, lookErr := exec.LookPath("go")
+		if lookErr != nil {
+			t.Fatalf("could not build http server: %v", lookErr)
+		}
+
+		cmd := exec.Command(goCmd, "build", "-o", filepath.Join(tmp, "httpserver"), "github.com/docker/docker/contrib/httpserver")
+		cmd.Env = append(os.Environ(), []string{
+			"CGO_ENABLED=0",
+			"GOOS=" + goos,
+			"GOARCH=" + goarch,
+		}...)
+		var out []byte
+		if out, err = cmd.CombinedOutput(); err != nil {
+			t.Fatalf("could not build http server: %s", string(out))
+		}
+	} else {
+		if out, err := exec.Command(cpCmd, "../contrib/httpserver/httpserver", filepath.Join(tmp, "httpserver")).CombinedOutput(); err != nil {
+			t.Fatalf("could not copy http server: %v", string(out))
+		}
+	}
+
+	if out, err := exec.Command(cpCmd, "../contrib/httpserver/Dockerfile", filepath.Join(tmp, "Dockerfile")).CombinedOutput(); err != nil {
 		t.Fatalf("could not build http server: %v", string(out))
 	}
 
diff --git a/integration-cli/cli/build/fakestorage/storage.go b/integration-cli/cli/build/fakestorage/storage.go
index 25cd872..eb03636 100644
--- a/integration-cli/cli/build/fakestorage/storage.go
+++ b/integration-cli/cli/build/fakestorage/storage.go
@@ -14,7 +14,7 @@
 	"github.com/docker/docker/integration-cli/cli/build/fakecontext"
 	"github.com/docker/docker/integration-cli/request"
 	"github.com/docker/docker/internal/test/environment"
-	"github.com/docker/docker/pkg/stringutils"
+	"github.com/docker/docker/internal/testutil"
 	"github.com/stretchr/testify/require"
 )
 
@@ -124,8 +124,8 @@
 
 func newRemoteFileServer(t testingT, ctx *fakecontext.Fake) *remoteFileServer {
 	var (
-		image     = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10)))
-		container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10)))
+		image     = fmt.Sprintf("fileserver-img-%s", strings.ToLower(testutil.GenerateRandomAlphaOnlyString(10)))
+		container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(testutil.GenerateRandomAlphaOnlyString(10)))
 	)
 
 	ensureHTTPServerImage(t)
diff --git a/integration-cli/cli/cli.go b/integration-cli/cli/cli.go
index b8230b2..813c3ad 100644
--- a/integration-cli/cli/cli.go
+++ b/integration-cli/cli/cli.go
@@ -115,7 +115,7 @@
 // validateArgs is a checker to ensure tests are not running commands which are
 // not supported on platforms. Specifically on Windows this is 'busybox top'.
 func validateArgs(args ...string) error {
-	if testEnv.DaemonInfo.OSType != "windows" {
+	if testEnv.OSType != "windows" {
 		return nil
 	}
 	foundBusybox := -1
diff --git a/integration-cli/daemon/daemon.go b/integration-cli/daemon/daemon.go
index 06bf504..f6ad655 100644
--- a/integration-cli/daemon/daemon.go
+++ b/integration-cli/daemon/daemon.go
@@ -222,7 +222,7 @@
 		return errors.Wrapf(err, "[%s] could not find docker binary in $PATH", d.id)
 	}
 	args := append(d.GlobalFlags,
-		"--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock",
+		"--containerd", "/var/run/docker/containerd/docker-containerd.sock",
 		"--data-root", d.Root,
 		"--exec-root", d.execRoot,
 		"--pidfile", fmt.Sprintf("%s/docker.pid", d.Folder),
@@ -457,6 +457,8 @@
 		return err
 	}
 
+	d.cmd.Wait()
+
 	if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.Folder)); err != nil {
 		return err
 	}
diff --git a/integration-cli/daemon/daemon_swarm.go b/integration-cli/daemon/daemon_swarm.go
index c37c726..bd1ada0 100644
--- a/integration-cli/daemon/daemon_swarm.go
+++ b/integration-cli/daemon/daemon_swarm.go
@@ -198,6 +198,23 @@
 	}
 }
 
+// CheckServiceTasksInStateWithError returns the number of tasks with a matching state,
+// and optional message substring.
+func (d *Swarm) CheckServiceTasksInStateWithError(service string, state swarm.TaskState, errorMessage string) func(*check.C) (interface{}, check.CommentInterface) {
+	return func(c *check.C) (interface{}, check.CommentInterface) {
+		tasks := d.GetServiceTasks(c, service)
+		var count int
+		for _, task := range tasks {
+			if task.Status.State == state {
+				if errorMessage == "" || strings.Contains(task.Status.Err, errorMessage) {
+					count++
+				}
+			}
+		}
+		return count, nil
+	}
+}
+
 // CheckServiceRunningTasks returns the number of running tasks for the specified service
 func (d *Swarm) CheckServiceRunningTasks(service string) func(*check.C) (interface{}, check.CommentInterface) {
 	return d.CheckServiceTasksInState(service, swarm.TaskStateRunning, "")
diff --git a/integration-cli/docker_api_build_test.go b/integration-cli/docker_api_build_test.go
index 59b4510..3f43c95 100644
--- a/integration-cli/docker_api_build_test.go
+++ b/integration-cli/docker_api_build_test.go
@@ -28,6 +28,7 @@
 
 func (s *DockerSuite) TestBuildAPIDockerFileRemote(c *check.C) {
 	testRequires(c, NotUserNamespace)
+
 	var testD string
 	if testEnv.DaemonPlatform() == "windows" {
 		testD = `FROM busybox
@@ -438,6 +439,82 @@
 	assert.Contains(c, string(out), "Successfully built")
 }
 
+func (s *DockerSuite) TestBuildCopyCacheOnFileChange(c *check.C) {
+
+	dockerfile := `FROM busybox
+COPY file /file`
+
+	ctx1 := fakecontext.New(c, "",
+		fakecontext.WithDockerfile(dockerfile),
+		fakecontext.WithFile("file", "foo"))
+	ctx2 := fakecontext.New(c, "",
+		fakecontext.WithDockerfile(dockerfile),
+		fakecontext.WithFile("file", "bar"))
+
+	var build = func(ctx *fakecontext.Fake) string {
+		res, body, err := request.Post("/build",
+			request.RawContent(ctx.AsTarReader(c)),
+			request.ContentType("application/x-tar"))
+
+		require.NoError(c, err)
+		assert.Equal(c, http.StatusOK, res.StatusCode)
+
+		out, err := request.ReadBody(body)
+
+		ids := getImageIDsFromBuild(c, out)
+		return ids[len(ids)-1]
+	}
+
+	id1 := build(ctx1)
+	id2 := build(ctx1)
+	id3 := build(ctx2)
+
+	if id1 != id2 {
+		c.Fatal("didn't use the cache")
+	}
+	if id1 == id3 {
+		c.Fatal("COPY With different source file should not share same cache")
+	}
+}
+
+func (s *DockerSuite) TestBuildAddCacheOnFileChange(c *check.C) {
+
+	dockerfile := `FROM busybox
+ADD file /file`
+
+	ctx1 := fakecontext.New(c, "",
+		fakecontext.WithDockerfile(dockerfile),
+		fakecontext.WithFile("file", "foo"))
+	ctx2 := fakecontext.New(c, "",
+		fakecontext.WithDockerfile(dockerfile),
+		fakecontext.WithFile("file", "bar"))
+
+	var build = func(ctx *fakecontext.Fake) string {
+		res, body, err := request.Post("/build",
+			request.RawContent(ctx.AsTarReader(c)),
+			request.ContentType("application/x-tar"))
+
+		require.NoError(c, err)
+		assert.Equal(c, http.StatusOK, res.StatusCode)
+
+		out, err := request.ReadBody(body)
+
+		ids := getImageIDsFromBuild(c, out)
+		return ids[len(ids)-1]
+	}
+
+	id1 := build(ctx1)
+	id2 := build(ctx1)
+	id3 := build(ctx2)
+
+	if id1 != id2 {
+		c.Fatal("didn't use the cache")
+	}
+	if id1 == id3 {
+		c.Fatal("COPY With different source file should not share same cache")
+	}
+}
+
 func (s *DockerSuite) TestBuildWithSession(c *check.C) {
 	testRequires(c, ExperimentalDaemon)
 
@@ -509,7 +586,9 @@
 	sess, err := session.NewSession("foo1", "foo")
 	assert.Nil(c, err)
 
-	fsProvider := filesync.NewFSSyncProvider(dir, nil)
+	fsProvider := filesync.NewFSSyncProvider([]filesync.SyncedDir{
+		{Dir: dir},
+	})
 	sess.Allow(fsProvider)
 
 	g, ctx := errgroup.WithContext(context.Background())
@@ -519,7 +598,7 @@
 	})
 
 	g.Go(func() error {
-		res, body, err := request.Post("/build?remote=client-session&session="+sess.UUID(), func(req *http.Request) error {
+		res, body, err := request.Post("/build?remote=client-session&session="+sess.ID(), func(req *http.Request) error {
 			req.Body = ioutil.NopCloser(strings.NewReader(dockerfile))
 			return nil
 		})
@@ -540,6 +619,28 @@
 	return
 }
 
+func (s *DockerSuite) TestBuildScratchCopy(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+	dockerfile := `FROM scratch
+ADD Dockerfile /
+ENV foo bar`
+	ctx := fakecontext.New(c, "",
+		fakecontext.WithDockerfile(dockerfile),
+	)
+	defer ctx.Close()
+
+	res, body, err := request.Post(
+		"/build",
+		request.RawContent(ctx.AsTarReader(c)),
+		request.ContentType("application/x-tar"))
+	c.Assert(err, checker.IsNil)
+	c.Assert(res.StatusCode, checker.Equals, http.StatusOK)
+
+	out, err := request.ReadBody(body)
+	require.NoError(c, err)
+	assert.Contains(c, string(out), "Successfully built")
+}
+
 type buildLine struct {
 	Stream string
 	Aux    struct {
diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go
index 554d687..cd3566f 100644
--- a/integration-cli/docker_api_containers_test.go
+++ b/integration-cli/docker_api_containers_test.go
@@ -11,6 +11,7 @@
 	"os"
 	"path/filepath"
 	"regexp"
+	"runtime"
 	"strconv"
 	"strings"
 	"time"
@@ -30,6 +31,9 @@
 	"github.com/docker/docker/volume"
 	"github.com/docker/go-connections/nat"
 	"github.com/go-check/check"
+	"github.com/gotestyourself/gotestyourself/poll"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 	"golang.org/x/net/context"
 )
 
@@ -68,7 +72,6 @@
 	c.Assert(err, checker.IsNil)
 	c.Assert(containers, checker.HasLen, startCount+1)
 	actual := fmt.Sprintf("%+v", containers[0])
-	fmt.Println(actual)
 
 	// empty Labels field triggered this bug, make sense to check for everything
 	// cause even Ports for instance can trigger this bug
@@ -1369,8 +1372,7 @@
 		Image: "busybox",
 	}
 
-	var httpClient *http.Client
-	cli, err := client.NewClient(daemonHost(), "v1.18", httpClient, map[string]string{})
+	cli, err := request.NewEnvClientWithVersion("v1.18")
 
 	_, err = cli.ContainerCreate(context.Background(), &config, &containertypes.HostConfig{}, &networktypes.NetworkingConfig{}, "")
 	c.Assert(err, checker.IsNil)
@@ -1406,16 +1408,6 @@
 	c.Assert(err.Error(), checker.Contains, "container rootfs is marked read-only")
 }
 
-func (s *DockerSuite) TestContainerAPIGetContainersJSONEmpty(c *check.C) {
-	cli, err := client.NewEnvClient()
-	c.Assert(err, checker.IsNil)
-	defer cli.Close()
-
-	containers, err := cli.ContainerList(context.Background(), types.ContainerListOptions{All: true})
-	c.Assert(err, checker.IsNil)
-	c.Assert(containers, checker.HasLen, 0)
-}
-
 func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) {
 	// Not supported on Windows
 	testRequires(c, DaemonIsLinux)
@@ -1614,7 +1606,7 @@
 	defer cli.Close()
 
 	err = cli.ContainerRemove(context.Background(), "", types.ContainerRemoveOptions{})
-	c.Assert(err.Error(), checker.Contains, "Error response from daemon: page not found")
+	c.Assert(err.Error(), checker.Contains, "No such container")
 }
 
 func (s *DockerSuite) TestContainerAPIStatsWithNetworkDisabled(c *check.C) {
@@ -1910,18 +1902,38 @@
 	}
 
 	type testCase struct {
-		cfg      mounttypes.Mount
+		spec     mounttypes.Mount
 		expected types.MountPoint
 	}
 
+	var selinuxSharedLabel string
+	if runtime.GOOS == "linux" {
+		selinuxSharedLabel = "z"
+	}
+
 	cases := []testCase{
 		// use literal strings here for `Type` instead of the defined constants in the volume package to keep this honest
 		// Validation of the actual `Mount` struct is done in another test is not needed here
-		{mounttypes.Mount{Type: "volume", Target: destPath}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}},
-		{mounttypes.Mount{Type: "volume", Target: destPath + slash}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}},
-		{mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"}, types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath}},
-		{mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"}, types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath}},
-		{mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath}},
+		{
+			spec:     mounttypes.Mount{Type: "volume", Target: destPath},
+			expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
+		},
+		{
+			spec:     mounttypes.Mount{Type: "volume", Target: destPath + slash},
+			expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
+		},
+		{
+			spec:     mounttypes.Mount{Type: "volume", Target: destPath, Source: "test1"},
+			expected: types.MountPoint{Type: "volume", Name: "test1", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
+		},
+		{
+			spec:     mounttypes.Mount{Type: "volume", Target: destPath, ReadOnly: true, Source: "test2"},
+			expected: types.MountPoint{Type: "volume", Name: "test2", RW: false, Destination: destPath, Mode: selinuxSharedLabel},
+		},
+		{
+			spec:     mounttypes.Mount{Type: "volume", Target: destPath, Source: "test3", VolumeOptions: &mounttypes.VolumeOptions{DriverConfig: &mounttypes.Driver{Name: volume.DefaultDriverName}}},
+			expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", Name: "test3", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
+		},
 	}
 
 	if SameHostDaemon() {
@@ -1930,8 +1942,23 @@
 		c.Assert(err, checker.IsNil)
 		defer os.RemoveAll(tmpDir1)
 		cases = append(cases, []testCase{
-			{mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir1}},
-			{mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1}},
+			{
+				spec: mounttypes.Mount{
+					Type:   "bind",
+					Source: tmpDir1,
+					Target: destPath,
+				},
+				expected: types.MountPoint{
+					Type:        "bind",
+					RW:          true,
+					Destination: destPath,
+					Source:      tmpDir1,
+				},
+			},
+			{
+				spec:     mounttypes.Mount{Type: "bind", Source: tmpDir1, Target: destPath, ReadOnly: true},
+				expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir1},
+			},
 		}...)
 
 		// for modes only supported on Linux
@@ -1944,19 +1971,40 @@
 			c.Assert(mount.ForceMount("", tmpDir3, "none", "shared"), checker.IsNil)
 
 			cases = append(cases, []testCase{
-				{mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath}, types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3}},
-				{mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3}},
-				{mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}}, types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"}},
+				{
+					spec:     mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath},
+					expected: types.MountPoint{Type: "bind", RW: true, Destination: destPath, Source: tmpDir3},
+				},
+				{
+					spec:     mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true},
+					expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3},
+				},
+				{
+					spec:     mounttypes.Mount{Type: "bind", Source: tmpDir3, Target: destPath, ReadOnly: true, BindOptions: &mounttypes.BindOptions{Propagation: "shared"}},
+					expected: types.MountPoint{Type: "bind", RW: false, Destination: destPath, Source: tmpDir3, Propagation: "shared"},
+				},
 			}...)
 		}
 	}
 
 	if testEnv.DaemonPlatform() != "windows" { // Windows does not support volume populate
 		cases = append(cases, []testCase{
-			{mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}},
-			{mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath}},
-			{mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath}},
-			{mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}}, types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath}},
+			{
+				spec:     mounttypes.Mount{Type: "volume", Target: destPath, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}},
+				expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
+			},
+			{
+				spec:     mounttypes.Mount{Type: "volume", Target: destPath + slash, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}},
+				expected: types.MountPoint{Driver: volume.DefaultDriverName, Type: "volume", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
+			},
+			{
+				spec:     mounttypes.Mount{Type: "volume", Target: destPath, Source: "test4", VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}},
+				expected: types.MountPoint{Type: "volume", Name: "test4", RW: true, Destination: destPath, Mode: selinuxSharedLabel},
+			},
+			{
+				spec:     mounttypes.Mount{Type: "volume", Target: destPath, Source: "test5", ReadOnly: true, VolumeOptions: &mounttypes.VolumeOptions{NoCopy: true}},
+				expected: types.MountPoint{Type: "volume", Name: "test5", RW: false, Destination: destPath, Mode: selinuxSharedLabel},
+			},
 		}...)
 	}
 
@@ -1968,58 +2016,83 @@
 		ID string `json:"Id"`
 	}
 
-	cli, err := client.NewEnvClient()
-	c.Assert(err, checker.IsNil)
-	defer cli.Close()
-
+	ctx := context.Background()
+	apiclient := testEnv.APIClient()
 	for i, x := range cases {
-		c.Logf("case %d - config: %v", i, x.cfg)
-		container, err := cli.ContainerCreate(context.Background(), &containertypes.Config{Image: testImg}, &containertypes.HostConfig{Mounts: []mounttypes.Mount{x.cfg}}, &networktypes.NetworkingConfig{}, "")
-		c.Assert(err, checker.IsNil)
+		c.Logf("case %d - config: %v", i, x.spec)
+		container, err := apiclient.ContainerCreate(
+			ctx,
+			&containertypes.Config{Image: testImg},
+			&containertypes.HostConfig{Mounts: []mounttypes.Mount{x.spec}},
+			&networktypes.NetworkingConfig{},
+			"")
+		require.NoError(c, err)
 
-		id := container.ID
+		containerInspect, err := apiclient.ContainerInspect(ctx, container.ID)
+		require.NoError(c, err)
+		mps := containerInspect.Mounts
+		require.Len(c, mps, 1)
+		mountPoint := mps[0]
 
-		var mps []types.MountPoint
-		err = json.NewDecoder(strings.NewReader(inspectFieldJSON(c, id, "Mounts"))).Decode(&mps)
-		c.Assert(err, checker.IsNil)
-		c.Assert(mps, checker.HasLen, 1)
-		c.Assert(mps[0].Destination, checker.Equals, x.expected.Destination)
+		if x.expected.Source != "" {
+			assert.Equal(c, x.expected.Source, mountPoint.Source)
+		}
+		if x.expected.Name != "" {
+			assert.Equal(c, x.expected.Name, mountPoint.Name)
+		}
+		if x.expected.Driver != "" {
+			assert.Equal(c, x.expected.Driver, mountPoint.Driver)
+		}
+		if x.expected.Propagation != "" {
+			assert.Equal(c, x.expected.Propagation, mountPoint.Propagation)
+		}
+		assert.Equal(c, x.expected.RW, mountPoint.RW)
+		assert.Equal(c, x.expected.Type, mountPoint.Type)
+		assert.Equal(c, x.expected.Mode, mountPoint.Mode)
+		assert.Equal(c, x.expected.Destination, mountPoint.Destination)
 
-		if len(x.expected.Source) > 0 {
-			c.Assert(mps[0].Source, checker.Equals, x.expected.Source)
-		}
-		if len(x.expected.Name) > 0 {
-			c.Assert(mps[0].Name, checker.Equals, x.expected.Name)
-		}
-		if len(x.expected.Driver) > 0 {
-			c.Assert(mps[0].Driver, checker.Equals, x.expected.Driver)
-		}
-		c.Assert(mps[0].RW, checker.Equals, x.expected.RW)
-		c.Assert(mps[0].Type, checker.Equals, x.expected.Type)
-		c.Assert(mps[0].Mode, checker.Equals, x.expected.Mode)
-		if len(x.expected.Propagation) > 0 {
-			c.Assert(mps[0].Propagation, checker.Equals, x.expected.Propagation)
-		}
+		err = apiclient.ContainerStart(ctx, container.ID, types.ContainerStartOptions{})
+		require.NoError(c, err)
+		poll.WaitOn(c, containerExit(apiclient, container.ID), poll.WithDelay(time.Second))
 
-		out, _, err := dockerCmdWithError("start", "-a", id)
-		if (x.cfg.Type != "volume" || (x.cfg.VolumeOptions != nil && x.cfg.VolumeOptions.NoCopy)) && testEnv.DaemonPlatform() != "windows" {
-			c.Assert(err, checker.NotNil, check.Commentf("%s\n%v", out, mps[0]))
-		} else {
-			c.Assert(err, checker.IsNil, check.Commentf("%s\n%v", out, mps[0]))
-		}
+		err = apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
+			RemoveVolumes: true,
+			Force:         true,
+		})
+		require.NoError(c, err)
 
-		dockerCmd(c, "rm", "-fv", id)
-		if x.cfg.Type == "volume" && len(x.cfg.Source) > 0 {
-			// This should still exist even though we removed the container
-			dockerCmd(c, "volume", "inspect", mps[0].Name)
-		} else {
-			// This should be removed automatically when we removed the container
-			out, _, err := dockerCmdWithError("volume", "inspect", mps[0].Name)
-			c.Assert(err, checker.NotNil, check.Commentf(out))
+		switch {
+
+		// Named volumes still exist after the container is removed
+		case x.spec.Type == "volume" && len(x.spec.Source) > 0:
+			_, err := apiclient.VolumeInspect(ctx, mountPoint.Name)
+			require.NoError(c, err)
+
+		// Bind mounts are never removed with the container
+		case x.spec.Type == "bind":
+
+		// anonymous volumes are removed
+		default:
+			_, err := apiclient.VolumeInspect(ctx, mountPoint.Name)
+			assert.True(c, client.IsErrNotFound(err))
 		}
 	}
 }
 
+func containerExit(apiclient client.APIClient, name string) func(poll.LogT) poll.Result {
+	return func(logT poll.LogT) poll.Result {
+		container, err := apiclient.ContainerInspect(context.Background(), name)
+		if err != nil {
+			return poll.Error(err)
+		}
+		switch container.State.Status {
+		case "created", "running":
+			return poll.Continue("container %s is %s, waiting for exit", name, container.State.Status)
+		}
+		return poll.Success()
+	}
+}
+
 func (s *DockerSuite) TestContainersAPICreateMountsTmpfs(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 	type testCase struct {
diff --git a/integration-cli/docker_api_containers_unix_test.go b/integration-cli/docker_api_containers_unix_test.go
new file mode 100644
index 0000000..4964f52
--- /dev/null
+++ b/integration-cli/docker_api_containers_unix_test.go
@@ -0,0 +1,77 @@
+// +build !windows
+
+package main
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/docker/docker/api/types"
+	containertypes "github.com/docker/docker/api/types/container"
+	mounttypes "github.com/docker/docker/api/types/mount"
+	networktypes "github.com/docker/docker/api/types/network"
+	"github.com/docker/docker/client"
+	"github.com/docker/docker/integration-cli/checker"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/docker/docker/pkg/system"
+	"github.com/go-check/check"
+	"github.com/stretchr/testify/assert"
+	"golang.org/x/net/context"
+)
+
+func (s *DockerSuite) TestContainersAPINetworkMountsNoChown(c *check.C) {
+	// chown only applies to Linux bind mounted volumes; must be same host to verify
+	testRequires(c, DaemonIsLinux, SameHostDaemon)
+
+	tmpDir, err := ioutils.TempDir("", "test-network-mounts")
+	c.Assert(err, checker.IsNil)
+	defer os.RemoveAll(tmpDir)
+
+	// make tmp dir readable by anyone to allow userns process to mount from
+	err = os.Chmod(tmpDir, 0755)
+	c.Assert(err, checker.IsNil)
+	// create temp files to use as network mounts
+	tmpNWFileMount := filepath.Join(tmpDir, "nwfile")
+
+	err = ioutil.WriteFile(tmpNWFileMount, []byte("network file bind mount"), 0644)
+	c.Assert(err, checker.IsNil)
+
+	config := containertypes.Config{
+		Image: "busybox",
+	}
+	hostConfig := containertypes.HostConfig{
+		Mounts: []mounttypes.Mount{
+			{
+				Type:   "bind",
+				Source: tmpNWFileMount,
+				Target: "/etc/resolv.conf",
+			},
+			{
+				Type:   "bind",
+				Source: tmpNWFileMount,
+				Target: "/etc/hostname",
+			},
+			{
+				Type:   "bind",
+				Source: tmpNWFileMount,
+				Target: "/etc/hosts",
+			},
+		},
+	}
+
+	cli, err := client.NewEnvClient()
+	c.Assert(err, checker.IsNil)
+	defer cli.Close()
+
+	ctrCreate, err := cli.ContainerCreate(context.Background(), &config, &hostConfig, &networktypes.NetworkingConfig{}, "")
+	c.Assert(err, checker.IsNil)
+	// container will exit immediately because of no tty, but we only need the start sequence to test the condition
+	err = cli.ContainerStart(context.Background(), ctrCreate.ID, types.ContainerStartOptions{})
+	c.Assert(err, checker.IsNil)
+
+	// check that host-located bind mount network file did not change ownership when the container was started
+	statT, err := system.Stat(tmpNWFileMount)
+	c.Assert(err, checker.IsNil)
+	assert.Equal(c, uint32(0), statT.UID(), "bind mounted network file should not change ownership from root")
+}
diff --git a/integration-cli/docker_api_images_test.go b/integration-cli/docker_api_images_test.go
index 8ad12fb..fba69dc 100644
--- a/integration-cli/docker_api_images_test.go
+++ b/integration-cli/docker_api_images_test.go
@@ -119,7 +119,7 @@
 }
 
 func (s *DockerSuite) TestAPIImagesImportBadSrc(c *check.C) {
-	testRequires(c, Network)
+	testRequires(c, Network, SameHostDaemon)
 
 	server := httptest.NewServer(http.NewServeMux())
 	defer server.Close()
@@ -179,8 +179,7 @@
 		Labels      map[string]string
 	}
 
-	var httpClient *http.Client
-	cli, err = client.NewClient(daemonHost(), "v1.24", httpClient, nil)
+	cli, err = request.NewEnvClientWithVersion("v1.24")
 	c.Assert(err, checker.IsNil)
 	defer cli.Close()
 
diff --git a/integration-cli/docker_api_inspect_unix_test.go b/integration-cli/docker_api_inspect_unix_test.go
index 93c4094..dae64be 100644
--- a/integration-cli/docker_api_inspect_unix_test.go
+++ b/integration-cli/docker_api_inspect_unix_test.go
@@ -4,10 +4,9 @@
 
 import (
 	"encoding/json"
-	"net/http"
 
-	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/checker"
+	"github.com/docker/docker/integration-cli/request"
 	"github.com/go-check/check"
 	"golang.org/x/net/context"
 )
@@ -19,8 +18,7 @@
 
 	name := "cpusetinconfig-pre120"
 	dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true")
-	var httpClient *http.Client
-	cli, err := client.NewClient(daemonHost(), "v1.19", httpClient, nil)
+	cli, err := request.NewEnvClientWithVersion("v1.19")
 	c.Assert(err, checker.IsNil)
 	defer cli.Close()
 	_, body, err := cli.ContainerInspectWithRaw(context.Background(), name, false)
diff --git a/integration-cli/docker_api_ipcmode_test.go b/integration-cli/docker_api_ipcmode_test.go
index 07fbbca..0254887 100644
--- a/integration-cli/docker_api_ipcmode_test.go
+++ b/integration-cli/docker_api_ipcmode_test.go
@@ -44,11 +44,7 @@
 		}
 	}
 
-	if err := s.Err(); err != nil {
-		return false, err
-	}
-
-	return false, nil
+	return false, s.Err()
 }
 
 // testIpcNonePrivateShareable is a helper function to test "none",
diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go
index 1f2a30a..0672e32 100644
--- a/integration-cli/docker_api_logs_test.go
+++ b/integration-cli/docker_api_logs_test.go
@@ -2,8 +2,12 @@
 
 import (
 	"bufio"
+	"bytes"
 	"fmt"
+	"io"
+	"io/ioutil"
 	"net/http"
+	"strconv"
 	"strings"
 	"time"
 
@@ -11,6 +15,7 @@
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/request"
+	"github.com/docker/docker/pkg/stdcopy"
 	"github.com/go-check/check"
 	"golang.org/x/net/context"
 )
@@ -85,3 +90,125 @@
 	c.Assert(err, checker.IsNil)
 	c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound)
 }
+
+func (s *DockerSuite) TestLogsAPIUntilFutureFollow(c *check.C) {
+	testRequires(c, DaemonIsLinux)
+
+	name := "logsuntilfuturefollow"
+	dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", "while true; do date +%s; sleep 1; done")
+	c.Assert(waitRun(name), checker.IsNil)
+
+	untilSecs := 5
+	untilDur, err := time.ParseDuration(fmt.Sprintf("%ds", untilSecs))
+	c.Assert(err, checker.IsNil)
+	until := daemonTime(c).Add(untilDur)
+
+	client, err := request.NewClient()
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	cfg := types.ContainerLogsOptions{Until: until.Format(time.RFC3339Nano), Follow: true, ShowStdout: true, Timestamps: true}
+	reader, err := client.ContainerLogs(context.Background(), name, cfg)
+	c.Assert(err, checker.IsNil)
+
+	type logOut struct {
+		out string
+		err error
+	}
+
+	chLog := make(chan logOut)
+
+	go func() {
+		bufReader := bufio.NewReader(reader)
+		defer reader.Close()
+		for i := 0; i < untilSecs; i++ {
+			out, _, err := bufReader.ReadLine()
+			if err != nil {
+				if err == io.EOF {
+					return
+				}
+				chLog <- logOut{"", err}
+				return
+			}
+
+			chLog <- logOut{strings.TrimSpace(string(out)), err}
+		}
+	}()
+
+	for i := 0; i < untilSecs; i++ {
+		select {
+		case l := <-chLog:
+			c.Assert(l.err, checker.IsNil)
+			i, err := strconv.ParseInt(strings.Split(l.out, " ")[1], 10, 64)
+			c.Assert(err, checker.IsNil)
+			c.Assert(time.Unix(i, 0).UnixNano(), checker.LessOrEqualThan, until.UnixNano())
+		case <-time.After(20 * time.Second):
+			c.Fatal("timeout waiting for logs to exit")
+		}
+	}
+}
+
+func (s *DockerSuite) TestLogsAPIUntil(c *check.C) {
+	name := "logsuntil"
+	dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; sleep 0.5; done")
+
+	client, err := request.NewClient()
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	extractBody := func(c *check.C, cfg types.ContainerLogsOptions) []string {
+		reader, err := client.ContainerLogs(context.Background(), name, cfg)
+		c.Assert(err, checker.IsNil)
+
+		actualStdout := new(bytes.Buffer)
+		actualStderr := ioutil.Discard
+		_, err = stdcopy.StdCopy(actualStdout, actualStderr, reader)
+		c.Assert(err, checker.IsNil)
+
+		return strings.Split(actualStdout.String(), "\n")
+	}
+
+	// Get timestamp of second log line
+	allLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true})
+	t, err := time.Parse(time.RFC3339Nano, strings.Split(allLogs[1], " ")[0])
+	c.Assert(err, checker.IsNil)
+	until := t.Format(time.RFC3339Nano)
+
+	// Get logs until the timestamp of second line, i.e. first two lines
+	logs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true, Until: until})
+
+	// Ensure log lines after cut-off are excluded
+	logsString := strings.Join(logs, "\n")
+	c.Assert(logsString, checker.Not(checker.Contains), "log3", check.Commentf("unexpected log message returned, until=%v", until))
+}
+
+func (s *DockerSuite) TestLogsAPIUntilDefaultValue(c *check.C) {
+	name := "logsuntildefaultval"
+	dockerCmd(c, "run", "--name", name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do echo log$i; done")
+
+	client, err := request.NewClient()
+	if err != nil {
+		c.Fatal(err)
+	}
+
+	extractBody := func(c *check.C, cfg types.ContainerLogsOptions) []string {
+		reader, err := client.ContainerLogs(context.Background(), name, cfg)
+		c.Assert(err, checker.IsNil)
+
+		actualStdout := new(bytes.Buffer)
+		actualStderr := ioutil.Discard
+		_, err = stdcopy.StdCopy(actualStdout, actualStderr, reader)
+		c.Assert(err, checker.IsNil)
+
+		return strings.Split(actualStdout.String(), "\n")
+	}
+
+	// Get timestamp of second log line
+	allLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true})
+
+	// Test with default value specified and parameter omitted
+	defaultLogs := extractBody(c, types.ContainerLogsOptions{Timestamps: true, ShowStdout: true, Until: "0"})
+	c.Assert(defaultLogs, checker.DeepEquals, allLogs)
+}
diff --git a/integration-cli/docker_api_network_test.go b/integration-cli/docker_api_network_test.go
index 129ec7e..2cdd262 100644
--- a/integration-cli/docker_api_network_test.go
+++ b/integration-cli/docker_api_network_test.go
@@ -35,7 +35,7 @@
 			CheckDuplicate: true,
 		},
 	}
-	id := createNetwork(c, config, true)
+	id := createNetwork(c, config, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, name), checker.Equals, true)
 
 	// delete the network and make sure it is deleted
@@ -60,14 +60,14 @@
 	}
 
 	// Creating a new network first
-	createNetwork(c, configOnCheck, true)
+	createNetwork(c, configOnCheck, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, name), checker.Equals, true)
 
 	// Creating another network with same name and CheckDuplicate must fail
-	createNetwork(c, configOnCheck, false)
+	createNetwork(c, configOnCheck, http.StatusConflict)
 
 	// Creating another network with same name and not CheckDuplicate must succeed
-	createNetwork(c, configNotCheck, true)
+	createNetwork(c, configNotCheck, http.StatusCreated)
 }
 
 func (s *DockerSuite) TestAPINetworkFilter(c *check.C) {
@@ -76,7 +76,7 @@
 	c.Assert(nr.Name, checker.Equals, "bridge")
 }
 
-func (s *DockerSuite) TestAPINetworkInspect(c *check.C) {
+func (s *DockerSuite) TestAPINetworkInspectBridge(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 	// Inspect default bridge network
 	nr := getNetworkResource(c, "bridge")
@@ -94,13 +94,15 @@
 	c.Assert(nr.Internal, checker.Equals, false)
 	c.Assert(nr.EnableIPv6, checker.Equals, false)
 	c.Assert(nr.IPAM.Driver, checker.Equals, "default")
-	c.Assert(len(nr.Containers), checker.Equals, 1)
 	c.Assert(nr.Containers[containerID], checker.NotNil)
 
 	ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address)
 	c.Assert(err, checker.IsNil)
 	c.Assert(ip.String(), checker.Equals, containerIP)
+}
 
+func (s *DockerSuite) TestAPINetworkInspectUserDefinedNetwork(c *check.C) {
+	testRequires(c, DaemonIsLinux)
 	// IPAM configuration inspect
 	ipam := &network.IPAM{
 		Driver: "default",
@@ -114,10 +116,10 @@
 			Options: map[string]string{"foo": "bar", "opts": "dopts"},
 		},
 	}
-	id0 := createNetwork(c, config, true)
+	id0 := createNetwork(c, config, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true)
 
-	nr = getNetworkResource(c, id0)
+	nr := getNetworkResource(c, id0)
 	c.Assert(len(nr.IPAM.Config), checker.Equals, 1)
 	c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16")
 	c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24")
@@ -137,7 +139,7 @@
 	config := types.NetworkCreateRequest{
 		Name: name,
 	}
-	id := createNetwork(c, config, true)
+	id := createNetwork(c, config, http.StatusCreated)
 	nr := getNetworkResource(c, id)
 	c.Assert(nr.Name, checker.Equals, name)
 	c.Assert(nr.ID, checker.Equals, id)
@@ -185,7 +187,7 @@
 			IPAM:   ipam0,
 		},
 	}
-	id0 := createNetwork(c, config0, true)
+	id0 := createNetwork(c, config0, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true)
 
 	ipam1 := &network.IPAM{
@@ -200,7 +202,7 @@
 			IPAM:   ipam1,
 		},
 	}
-	createNetwork(c, config1, false)
+	createNetwork(c, config1, http.StatusForbidden)
 	c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false)
 
 	ipam2 := &network.IPAM{
@@ -215,20 +217,20 @@
 			IPAM:   ipam2,
 		},
 	}
-	createNetwork(c, config2, true)
+	createNetwork(c, config2, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true)
 
 	// remove test0 and retry to create test1
 	deleteNetwork(c, id0, true)
-	createNetwork(c, config1, true)
+	createNetwork(c, config1, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true)
 
 	// for networks w/o ipam specified, docker will choose proper non-overlapping subnets
-	createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, true)
+	createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true)
-	createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, true)
+	createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true)
-	createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, true)
+	createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, http.StatusCreated)
 	c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true)
 
 	for i := 1; i < 6; i++ {
@@ -251,9 +253,8 @@
 			CheckDuplicate: true,
 		},
 	}
-	shouldSucceed := false
-	createNetwork(c, config, shouldSucceed)
-	deleteNetwork(c, name, shouldSucceed)
+	createNetwork(c, config, http.StatusForbidden)
+	deleteNetwork(c, name, false)
 }
 
 func isNetworkAvailable(c *check.C, name string) bool {
@@ -280,7 +281,7 @@
 		filterArgs = filters.NewArgs()
 	)
 	filterArgs.Add("name", name)
-	filterJSON, err := filters.ToParam(filterArgs)
+	filterJSON, err := filters.ToJSON(filterArgs)
 	c.Assert(err, checker.IsNil)
 	v.Set("filters", filterJSON)
 
@@ -291,9 +292,16 @@
 	nJSON := []types.NetworkResource{}
 	err = json.NewDecoder(body).Decode(&nJSON)
 	c.Assert(err, checker.IsNil)
-	c.Assert(len(nJSON), checker.Equals, 1)
+	var res string
+	for _, n := range nJSON {
+		// Find exact match
+		if n.Name == name {
+			res = n.ID
+		}
+	}
+	c.Assert(res, checker.Not(checker.Equals), "")
 
-	return nJSON[0].ID
+	return res
 }
 
 func getNetworkResource(c *check.C, id string) *types.NetworkResource {
@@ -307,22 +315,22 @@
 	return &nr
 }
 
-func createNetwork(c *check.C, config types.NetworkCreateRequest, shouldSucceed bool) string {
+func createNetwork(c *check.C, config types.NetworkCreateRequest, expectedStatusCode int) string {
 	resp, body, err := request.Post("/networks/create", request.JSONBody(config))
 	c.Assert(err, checker.IsNil)
 	defer resp.Body.Close()
-	if !shouldSucceed {
-		c.Assert(resp.StatusCode, checker.Not(checker.Equals), http.StatusCreated)
+
+	c.Assert(resp.StatusCode, checker.Equals, expectedStatusCode)
+
+	if expectedStatusCode == http.StatusCreated {
+		var nr types.NetworkCreateResponse
+		err = json.NewDecoder(body).Decode(&nr)
+		c.Assert(err, checker.IsNil)
+
+		return nr.ID
+	} else {
 		return ""
 	}
-
-	c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated)
-
-	var nr types.NetworkCreateResponse
-	err = json.NewDecoder(body).Decode(&nr)
-	c.Assert(err, checker.IsNil)
-
-	return nr.ID
 }
 
 func connectNetwork(c *check.C, nid, cid string) {
diff --git a/integration-cli/docker_api_stats_test.go b/integration-cli/docker_api_stats_test.go
index 2e8515a..7c9de1c 100644
--- a/integration-cli/docker_api_stats_test.go
+++ b/integration-cli/docker_api_stats_test.go
@@ -285,7 +285,7 @@
 	id2 := strings.TrimSpace(out2)
 	c.Assert(waitRun(id2), checker.IsNil)
 
-	ch := make(chan error)
+	ch := make(chan error, 1)
 	go func() {
 		resp, body, err := request.Get(fmt.Sprintf("/containers/%s/stats?stream=false", id2))
 		defer body.Close()
diff --git a/integration-cli/docker_api_swarm_config_test.go b/integration-cli/docker_api_swarm_config_test.go
index c06f3c4..c01d80f 100644
--- a/integration-cli/docker_api_swarm_config_test.go
+++ b/integration-cli/docker_api_swarm_config_test.go
@@ -55,10 +55,8 @@
 	c.Assert(err, checker.IsNil)
 	defer cli.Close()
 
-	expected := "no such config"
-
 	_, _, err = cli.ConfigInspectWithRaw(context.Background(), id)
-	c.Assert(err.Error(), checker.Contains, expected)
+	c.Assert(err.Error(), checker.Contains, "No such config")
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmConfigsUpdate(c *check.C) {
diff --git a/integration-cli/docker_api_swarm_secret_test.go b/integration-cli/docker_api_swarm_secret_test.go
index db346dd..c30e223 100644
--- a/integration-cli/docker_api_swarm_secret_test.go
+++ b/integration-cli/docker_api_swarm_secret_test.go
@@ -64,14 +64,12 @@
 	c.Assert(err, checker.IsNil)
 	defer cli.Close()
 
-	expected := "no such secret"
 	_, _, err = cli.SecretInspectWithRaw(context.Background(), id)
-	c.Assert(err.Error(), checker.Contains, expected)
+	c.Assert(err.Error(), checker.Contains, "No such secret")
 
 	id = "non-existing"
-	expected = "secret non-existing not found"
 	err = cli.SecretRemove(context.Background(), id)
-	c.Assert(err.Error(), checker.Contains, expected)
+	c.Assert(err.Error(), checker.Contains, "No such secret: non-existing")
 }
 
 func (s *DockerSwarmSuite) TestAPISwarmSecretsUpdate(c *check.C) {
diff --git a/integration-cli/docker_api_swarm_service_test.go b/integration-cli/docker_api_swarm_service_test.go
index a9563f2..845453d 100644
--- a/integration-cli/docker_api_swarm_service_test.go
+++ b/integration-cli/docker_api_swarm_service_test.go
@@ -186,7 +186,7 @@
 
 	// Roll back to the previous version. This uses the CLI because
 	// rollback used to be a client-side operation.
-	out, err := daemons[0].Cmd("service", "update", "--rollback", id)
+	out, err := daemons[0].Cmd("service", "update", "--detach", "--rollback", id)
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// first batch
@@ -295,7 +295,7 @@
 
 	// Roll back to the previous version. This uses the CLI because
 	// rollback is a client-side operation.
-	out, err := d.Cmd("service", "update", "--rollback", id)
+	out, err := d.Cmd("service", "update", "--detach", "--rollback", id)
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// first batch
@@ -340,7 +340,7 @@
 
 	// Roll back to the previous version. This uses the CLI because
 	// rollback used to be a client-side operation.
-	out, err := daemons[0].Cmd("service", "update", "--rollback", id)
+	out, err := daemons[0].Cmd("service", "update", "--detach", "--rollback", id)
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	waitAndAssert(c, defaultReconciliationTimeout, daemons[0].CheckRunningTaskImages, checker.DeepEquals,
diff --git a/integration-cli/docker_api_swarm_test.go b/integration-cli/docker_api_swarm_test.go
index 8f8b8eb..adb3885 100644
--- a/integration-cli/docker_api_swarm_test.go
+++ b/integration-cli/docker_api_swarm_test.go
@@ -370,8 +370,12 @@
 	cli, err := d1.NewClient()
 	c.Assert(err, checker.IsNil)
 	defer cli.Close()
-	_, err = cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
-	c.Assert(err.Error(), checker.Contains, "deadline exceeded")
+
+	// d1 will eventually step down from leader because there is no longer an active quorum, wait for that to happen
+	waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) {
+		_, err = cli.ServiceCreate(context.Background(), service.Spec, types.ServiceCreateOptions{})
+		return err.Error(), nil
+	}, checker.Contains, "Make sure more than half of the managers are online.")
 
 	d2.Start(c)
 
diff --git a/integration-cli/docker_api_version_test.go b/integration-cli/docker_api_version_test.go
deleted file mode 100644
index 0b995f7..0000000
--- a/integration-cli/docker_api_version_test.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package main
-
-import (
-	"github.com/docker/docker/client"
-	"github.com/docker/docker/dockerversion"
-	"github.com/docker/docker/integration-cli/checker"
-	"github.com/go-check/check"
-	"golang.org/x/net/context"
-)
-
-func (s *DockerSuite) TestGetVersion(c *check.C) {
-	cli, err := client.NewEnvClient()
-	c.Assert(err, checker.IsNil)
-	defer cli.Close()
-
-	v, err := cli.ServerVersion(context.Background())
-	c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch"))
-}
diff --git a/integration-cli/docker_api_volumes_test.go b/integration-cli/docker_api_volumes_test.go
index 5e3d6a9..65a9652 100644
--- a/integration-cli/docker_api_volumes_test.go
+++ b/integration-cli/docker_api_volumes_test.go
@@ -16,16 +16,27 @@
 
 func (s *DockerSuite) TestVolumesAPIList(c *check.C) {
 	prefix, _ := getPrefixAndSlashFromDaemonPlatform()
-	dockerCmd(c, "run", "-v", prefix+"/foo", "busybox")
+	cid, _ := dockerCmd(c, "run", "-d", "-v", prefix+"/foo", "busybox")
 
 	cli, err := client.NewEnvClient()
 	c.Assert(err, checker.IsNil)
 	defer cli.Close()
 
+	container, err := cli.ContainerInspect(context.Background(), strings.TrimSpace(cid))
+	c.Assert(err, checker.IsNil)
+	vname := container.Mounts[0].Name
+
 	volumes, err := cli.VolumeList(context.Background(), filters.Args{})
 	c.Assert(err, checker.IsNil)
 
-	c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes))
+	found := false
+	for _, vol := range volumes.Volumes {
+		if vol.Name == vname {
+			found = true
+			break
+		}
+	}
+	c.Assert(found, checker.Equals, true)
 }
 
 func (s *DockerSuite) TestVolumesAPICreate(c *check.C) {
@@ -45,21 +56,21 @@
 
 func (s *DockerSuite) TestVolumesAPIRemove(c *check.C) {
 	prefix, _ := getPrefixAndSlashFromDaemonPlatform()
-	dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox")
+	cid, _ := dockerCmd(c, "run", "-d", "-v", prefix+"/foo", "--name=test", "busybox")
 
 	cli, err := client.NewEnvClient()
 	c.Assert(err, checker.IsNil)
 	defer cli.Close()
 
-	volumes, err := cli.VolumeList(context.Background(), filters.Args{})
+	container, err := cli.ContainerInspect(context.Background(), strings.TrimSpace(cid))
 	c.Assert(err, checker.IsNil)
+	vname := container.Mounts[0].Name
 
-	v := volumes.Volumes[0]
-	err = cli.VolumeRemove(context.Background(), v.Name, false)
+	err = cli.VolumeRemove(context.Background(), vname, false)
 	c.Assert(err.Error(), checker.Contains, "volume is in use")
 
 	dockerCmd(c, "rm", "-f", "test")
-	err = cli.VolumeRemove(context.Background(), v.Name, false)
+	err = cli.VolumeRemove(context.Background(), vname, false)
 	c.Assert(err, checker.IsNil)
 }
 
@@ -78,10 +89,6 @@
 	_, err = cli.VolumeCreate(context.Background(), config)
 	c.Assert(err, check.IsNil)
 
-	volumes, err := cli.VolumeList(context.Background(), filters.Args{})
-	c.Assert(err, checker.IsNil)
-	c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes))
-
 	vol, err := cli.VolumeInspect(context.Background(), config.Name)
 	c.Assert(err, checker.IsNil)
 	c.Assert(vol.Name, checker.Equals, config.Name)
diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go
index db43beb..353cb65 100644
--- a/integration-cli/docker_cli_attach_test.go
+++ b/integration-cli/docker_cli_attach_test.go
@@ -147,7 +147,10 @@
 	c.Assert(err, check.IsNil)
 	defer stdout.Close()
 	c.Assert(cmd.Start(), check.IsNil)
-	defer cmd.Process.Kill()
+	defer func() {
+		cmd.Process.Kill()
+		cmd.Wait()
+	}()
 
 	_, err = stdin.Write([]byte("hello\n"))
 	c.Assert(err, check.IsNil)
diff --git a/integration-cli/docker_cli_attach_unix_test.go b/integration-cli/docker_cli_attach_unix_test.go
index 78f55e0..e40d7cf 100644
--- a/integration-cli/docker_cli_attach_unix_test.go
+++ b/integration-cli/docker_cli_attach_unix_test.go
@@ -173,7 +173,7 @@
 	c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running"))
 
 	go func() {
-		dockerCmd(c, "kill", id)
+		dockerCmdWithResult("kill", id)
 	}()
 
 	select {
@@ -225,7 +225,7 @@
 	c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running"))
 
 	go func() {
-		dockerCmd(c, "kill", id)
+		dockerCmdWithResult("kill", id)
 	}()
 
 	select {
diff --git a/integration-cli/docker_cli_authz_plugin_v2_test.go b/integration-cli/docker_cli_authz_plugin_v2_test.go
deleted file mode 100644
index b8bbcc7..0000000
--- a/integration-cli/docker_cli_authz_plugin_v2_test.go
+++ /dev/null
@@ -1,168 +0,0 @@
-// +build !windows
-
-package main
-
-import (
-	"fmt"
-	"strings"
-
-	"github.com/docker/docker/integration-cli/checker"
-	"github.com/docker/docker/integration-cli/daemon"
-	"github.com/go-check/check"
-)
-
-var (
-	authzPluginName            = "riyaz/authz-no-volume-plugin"
-	authzPluginTag             = "latest"
-	authzPluginNameWithTag     = authzPluginName + ":" + authzPluginTag
-	authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest"
-	nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin"
-)
-
-func init() {
-	check.Suite(&DockerAuthzV2Suite{
-		ds: &DockerSuite{},
-	})
-}
-
-type DockerAuthzV2Suite struct {
-	ds *DockerSuite
-	d  *daemon.Daemon
-}
-
-func (s *DockerAuthzV2Suite) SetUpTest(c *check.C) {
-	testRequires(c, DaemonIsLinux, Network)
-	s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
-		Experimental: testEnv.ExperimentalDaemon(),
-	})
-	s.d.Start(c)
-}
-
-func (s *DockerAuthzV2Suite) TearDownTest(c *check.C) {
-	if s.d != nil {
-		s.d.Stop(c)
-		s.ds.TearDownTest(c)
-	}
-}
-
-func (s *DockerAuthzV2Suite) TestAuthZPluginAllowNonVolumeRequest(c *check.C) {
-	testRequires(c, DaemonIsLinux, IsAmd64, Network)
-	// Install authz plugin
-	_, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag)
-	c.Assert(err, checker.IsNil)
-	// start the daemon with the plugin and load busybox, --net=none build fails otherwise
-	// because it needs to pull busybox
-	s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag)
-	s.d.LoadBusybox(c)
-
-	// defer disabling the plugin
-	defer func() {
-		s.d.Restart(c)
-		_, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag)
-		c.Assert(err, checker.IsNil)
-		_, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag)
-		c.Assert(err, checker.IsNil)
-	}()
-
-	// Ensure docker run command and accompanying docker ps are successful
-	out, err := s.d.Cmd("run", "-d", "busybox", "top")
-	c.Assert(err, check.IsNil)
-
-	id := strings.TrimSpace(out)
-
-	out, err = s.d.Cmd("ps")
-	c.Assert(err, check.IsNil)
-	c.Assert(assertContainerList(out, []string{id}), check.Equals, true)
-}
-
-func (s *DockerAuthzV2Suite) TestAuthZPluginDisable(c *check.C) {
-	testRequires(c, DaemonIsLinux, IsAmd64, Network)
-	// Install authz plugin
-	_, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag)
-	c.Assert(err, checker.IsNil)
-	// start the daemon with the plugin and load busybox, --net=none build fails otherwise
-	// because it needs to pull busybox
-	s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag)
-	s.d.LoadBusybox(c)
-
-	// defer removing the plugin
-	defer func() {
-		s.d.Restart(c)
-		_, err = s.d.Cmd("plugin", "rm", "-f", authzPluginNameWithTag)
-		c.Assert(err, checker.IsNil)
-	}()
-
-	out, err := s.d.Cmd("volume", "create")
-	c.Assert(err, check.NotNil)
-	c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))
-
-	// disable the plugin
-	_, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag)
-	c.Assert(err, checker.IsNil)
-
-	// now test to see if the docker api works.
-	_, err = s.d.Cmd("volume", "create")
-	c.Assert(err, checker.IsNil)
-}
-
-func (s *DockerAuthzV2Suite) TestAuthZPluginRejectVolumeRequests(c *check.C) {
-	testRequires(c, DaemonIsLinux, IsAmd64, Network)
-	// Install authz plugin
-	_, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginNameWithTag)
-	c.Assert(err, checker.IsNil)
-
-	// restart the daemon with the plugin
-	s.d.Restart(c, "--authorization-plugin="+authzPluginNameWithTag)
-
-	// defer disabling the plugin
-	defer func() {
-		s.d.Restart(c)
-		_, err = s.d.Cmd("plugin", "disable", authzPluginNameWithTag)
-		c.Assert(err, checker.IsNil)
-		_, err = s.d.Cmd("plugin", "rm", authzPluginNameWithTag)
-		c.Assert(err, checker.IsNil)
-	}()
-
-	out, err := s.d.Cmd("volume", "create")
-	c.Assert(err, check.NotNil)
-	c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))
-
-	out, err = s.d.Cmd("volume", "ls")
-	c.Assert(err, check.NotNil)
-	c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))
-
-	// The plugin will block the command before it can determine the volume does not exist
-	out, err = s.d.Cmd("volume", "rm", "test")
-	c.Assert(err, check.NotNil)
-	c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))
-
-	out, err = s.d.Cmd("volume", "inspect", "test")
-	c.Assert(err, check.NotNil)
-	c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))
-
-	out, err = s.d.Cmd("volume", "prune", "-f")
-	c.Assert(err, check.NotNil)
-	c.Assert(out, checker.Contains, fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag))
-}
-
-func (s *DockerAuthzV2Suite) TestAuthZPluginBadManifestFailsDaemonStart(c *check.C) {
-	testRequires(c, DaemonIsLinux, IsAmd64, Network)
-	// Install authz plugin with bad manifest
-	_, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", authzPluginBadManifestName)
-	c.Assert(err, checker.IsNil)
-
-	// start the daemon with the plugin, it will error
-	c.Assert(s.d.RestartWithError("--authorization-plugin="+authzPluginBadManifestName), check.NotNil)
-
-	// restarting the daemon without requiring the plugin will succeed
-	s.d.Restart(c)
-}
-
-func (s *DockerAuthzV2Suite) TestNonexistentAuthZPluginFailsDaemonStart(c *check.C) {
-	testRequires(c, DaemonIsLinux, Network)
-	// start the daemon with a non-existent authz plugin, it will error
-	c.Assert(s.d.RestartWithError("--authorization-plugin="+nonexistentAuthzPluginName), check.NotNil)
-
-	// restarting the daemon without requiring the plugin will succeed
-	s.d.Start(c)
-}
diff --git a/integration-cli/docker_cli_authz_unix_test.go b/integration-cli/docker_cli_authz_unix_test.go
deleted file mode 100644
index f46ab80..0000000
--- a/integration-cli/docker_cli_authz_unix_test.go
+++ /dev/null
@@ -1,475 +0,0 @@
-// +build !windows
-
-package main
-
-import (
-	"encoding/json"
-	"fmt"
-	"io/ioutil"
-	"net/http"
-	"net/http/httptest"
-	"os"
-	"path/filepath"
-	"strings"
-
-	"bufio"
-	"bytes"
-	"os/exec"
-	"strconv"
-	"time"
-
-	"net"
-	"net/http/httputil"
-	"net/url"
-
-	"github.com/docker/docker/integration-cli/checker"
-	"github.com/docker/docker/integration-cli/daemon"
-	"github.com/docker/docker/pkg/authorization"
-	"github.com/docker/docker/pkg/plugins"
-	"github.com/go-check/check"
-)
-
-const (
-	testAuthZPlugin     = "authzplugin"
-	unauthorizedMessage = "User unauthorized authz plugin"
-	errorMessage        = "something went wrong..."
-	containerListAPI    = "/containers/json"
-)
-
-var (
-	alwaysAllowed = []string{"/_ping", "/info"}
-)
-
-func init() {
-	check.Suite(&DockerAuthzSuite{
-		ds: &DockerSuite{},
-	})
-}
-
-type DockerAuthzSuite struct {
-	server *httptest.Server
-	ds     *DockerSuite
-	d      *daemon.Daemon
-	ctrl   *authorizationController
-}
-
-type authorizationController struct {
-	reqRes        authorization.Response // reqRes holds the plugin response to the initial client request
-	resRes        authorization.Response // resRes holds the plugin response to the daemon response
-	psRequestCnt  int                    // psRequestCnt counts the number of calls to list container request api
-	psResponseCnt int                    // psResponseCnt counts the number of calls to list containers response API
-	requestsURIs  []string               // requestsURIs stores all request URIs that are sent to the authorization controller
-	reqUser       string
-	resUser       string
-}
-
-func (s *DockerAuthzSuite) SetUpTest(c *check.C) {
-	s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
-		Experimental: testEnv.ExperimentalDaemon(),
-	})
-	s.ctrl = &authorizationController{}
-}
-
-func (s *DockerAuthzSuite) TearDownTest(c *check.C) {
-	if s.d != nil {
-		s.d.Stop(c)
-		s.ds.TearDownTest(c)
-		s.ctrl = nil
-	}
-}
-
-func (s *DockerAuthzSuite) SetUpSuite(c *check.C) {
-	mux := http.NewServeMux()
-	s.server = httptest.NewServer(mux)
-
-	mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
-		b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}})
-		c.Assert(err, check.IsNil)
-		w.Write(b)
-	})
-
-	mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) {
-		defer r.Body.Close()
-		body, err := ioutil.ReadAll(r.Body)
-		c.Assert(err, check.IsNil)
-		authReq := authorization.Request{}
-		err = json.Unmarshal(body, &authReq)
-		c.Assert(err, check.IsNil)
-
-		assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody)
-		assertAuthHeaders(c, authReq.RequestHeaders)
-
-		// Count only container list api
-		if strings.HasSuffix(authReq.RequestURI, containerListAPI) {
-			s.ctrl.psRequestCnt++
-		}
-
-		s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI)
-
-		reqRes := s.ctrl.reqRes
-		if isAllowed(authReq.RequestURI) {
-			reqRes = authorization.Response{Allow: true}
-		}
-		if reqRes.Err != "" {
-			w.WriteHeader(http.StatusInternalServerError)
-		}
-		b, err := json.Marshal(reqRes)
-		c.Assert(err, check.IsNil)
-		s.ctrl.reqUser = authReq.User
-		w.Write(b)
-	})
-
-	mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) {
-		defer r.Body.Close()
-		body, err := ioutil.ReadAll(r.Body)
-		c.Assert(err, check.IsNil)
-		authReq := authorization.Request{}
-		err = json.Unmarshal(body, &authReq)
-		c.Assert(err, check.IsNil)
-
-		assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody)
-		assertAuthHeaders(c, authReq.ResponseHeaders)
-
-		// Count only container list api
-		if strings.HasSuffix(authReq.RequestURI, containerListAPI) {
-			s.ctrl.psResponseCnt++
-		}
-		resRes := s.ctrl.resRes
-		if isAllowed(authReq.RequestURI) {
-			resRes = authorization.Response{Allow: true}
-		}
-		if resRes.Err != "" {
-			w.WriteHeader(http.StatusInternalServerError)
-		}
-		b, err := json.Marshal(resRes)
-		c.Assert(err, check.IsNil)
-		s.ctrl.resUser = authReq.User
-		w.Write(b)
-	})
-
-	err := os.MkdirAll("/etc/docker/plugins", 0755)
-	c.Assert(err, checker.IsNil)
-
-	fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin)
-	err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644)
-	c.Assert(err, checker.IsNil)
-}
-
-// check for always allowed endpoints to not inhibit test framework functions
-func isAllowed(reqURI string) bool {
-	for _, endpoint := range alwaysAllowed {
-		if strings.HasSuffix(reqURI, endpoint) {
-			return true
-		}
-	}
-	return false
-}
-
-// assertAuthHeaders validates authentication headers are removed
-func assertAuthHeaders(c *check.C, headers map[string]string) error {
-	for k := range headers {
-		if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") {
-			c.Errorf("Found authentication headers in request '%v'", headers)
-		}
-	}
-	return nil
-}
-
-// assertBody asserts that body is removed for non text/json requests
-func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) {
-	if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 {
-		//return fmt.Errorf("Body included for authentication endpoint %s", string(body))
-		c.Errorf("Body included for authentication endpoint %s", string(body))
-	}
-
-	for k, v := range headers {
-		if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" {
-			return
-		}
-	}
-	if len(body) > 0 {
-		c.Errorf("Body included while it should not (Headers: '%v')", headers)
-	}
-}
-
-func (s *DockerAuthzSuite) TearDownSuite(c *check.C) {
-	if s.server == nil {
-		return
-	}
-
-	s.server.Close()
-
-	err := os.RemoveAll("/etc/docker/plugins")
-	c.Assert(err, checker.IsNil)
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) {
-	// start the daemon and load busybox, --net=none build fails otherwise
-	// cause it needs to pull busybox
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Allow = true
-	s.ctrl.resRes.Allow = true
-	s.d.LoadBusybox(c)
-
-	// Ensure command successful
-	out, err := s.d.Cmd("run", "-d", "busybox", "top")
-	c.Assert(err, check.IsNil)
-
-	id := strings.TrimSpace(out)
-	assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create")
-	assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id))
-
-	out, err = s.d.Cmd("ps")
-	c.Assert(err, check.IsNil)
-	c.Assert(assertContainerList(out, []string{id}), check.Equals, true)
-	c.Assert(s.ctrl.psRequestCnt, check.Equals, 1)
-	c.Assert(s.ctrl.psResponseCnt, check.Equals, 1)
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) {
-
-	const testDaemonHTTPSAddr = "tcp://localhost:4271"
-	// start the daemon and load busybox, --net=none build fails otherwise
-	// cause it needs to pull busybox
-	s.d.Start(c,
-		"--authorization-plugin="+testAuthZPlugin,
-		"--tlsverify",
-		"--tlscacert",
-		"fixtures/https/ca.pem",
-		"--tlscert",
-		"fixtures/https/server-cert.pem",
-		"--tlskey",
-		"fixtures/https/server-key.pem",
-		"-H", testDaemonHTTPSAddr)
-
-	s.ctrl.reqRes.Allow = true
-	s.ctrl.resRes.Allow = true
-
-	out, _ := dockerCmd(
-		c,
-		"--tlsverify",
-		"--tlscacert", "fixtures/https/ca.pem",
-		"--tlscert", "fixtures/https/client-cert.pem",
-		"--tlskey", "fixtures/https/client-key.pem",
-		"-H",
-		testDaemonHTTPSAddr,
-		"version",
-	)
-	if !strings.Contains(out, "Server") {
-		c.Fatalf("docker version should return information of server side")
-	}
-
-	c.Assert(s.ctrl.reqUser, check.Equals, "client")
-	c.Assert(s.ctrl.resUser, check.Equals, "client")
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) {
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Allow = false
-	s.ctrl.reqRes.Msg = unauthorizedMessage
-
-	// Ensure command is blocked
-	res, err := s.d.Cmd("ps")
-	c.Assert(err, check.NotNil)
-	c.Assert(s.ctrl.psRequestCnt, check.Equals, 1)
-	c.Assert(s.ctrl.psResponseCnt, check.Equals, 0)
-
-	// Ensure unauthorized message appears in response
-	c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage))
-}
-
-// TestAuthZPluginAPIDenyResponse validates that when authorization plugin deny the request, the status code is forbidden
-func (s *DockerAuthzSuite) TestAuthZPluginAPIDenyResponse(c *check.C) {
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Allow = false
-	s.ctrl.resRes.Msg = unauthorizedMessage
-
-	daemonURL, err := url.Parse(s.d.Sock())
-
-	conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10)
-	c.Assert(err, check.IsNil)
-	client := httputil.NewClientConn(conn, nil)
-	req, err := http.NewRequest("GET", "/version", nil)
-	c.Assert(err, check.IsNil)
-	resp, err := client.Do(req)
-
-	c.Assert(err, check.IsNil)
-	c.Assert(resp.StatusCode, checker.Equals, http.StatusForbidden)
-	c.Assert(err, checker.IsNil)
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) {
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Allow = true
-	s.ctrl.resRes.Allow = false
-	s.ctrl.resRes.Msg = unauthorizedMessage
-
-	// Ensure command is blocked
-	res, err := s.d.Cmd("ps")
-	c.Assert(err, check.NotNil)
-	c.Assert(s.ctrl.psRequestCnt, check.Equals, 1)
-	c.Assert(s.ctrl.psResponseCnt, check.Equals, 1)
-
-	// Ensure unauthorized message appears in response
-	c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage))
-}
-
-// TestAuthZPluginAllowEventStream verifies event stream propagates correctly after request pass through by the authorization plugin
-func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) {
-	testRequires(c, DaemonIsLinux)
-
-	// start the daemon and load busybox to avoid pulling busybox from Docker Hub
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Allow = true
-	s.ctrl.resRes.Allow = true
-	s.d.LoadBusybox(c)
-
-	startTime := strconv.FormatInt(daemonTime(c).Unix(), 10)
-	// Add another command to to enable event pipelining
-	eventsCmd := exec.Command(dockerBinary, "--host", s.d.Sock(), "events", "--since", startTime)
-	stdout, err := eventsCmd.StdoutPipe()
-	if err != nil {
-		c.Assert(err, check.IsNil)
-	}
-
-	observer := eventObserver{
-		buffer:    new(bytes.Buffer),
-		command:   eventsCmd,
-		scanner:   bufio.NewScanner(stdout),
-		startTime: startTime,
-	}
-
-	err = observer.Start()
-	c.Assert(err, checker.IsNil)
-	defer observer.Stop()
-
-	// Create a container and wait for the creation events
-	out, err := s.d.Cmd("run", "-d", "busybox", "top")
-	c.Assert(err, check.IsNil, check.Commentf(out))
-	containerID := strings.TrimSpace(out)
-	c.Assert(s.d.WaitRun(containerID), checker.IsNil)
-
-	events := map[string]chan bool{
-		"create": make(chan bool, 1),
-		"start":  make(chan bool, 1),
-	}
-
-	matcher := matchEventLine(containerID, "container", events)
-	processor := processEventMatch(events)
-	go observer.Match(matcher, processor)
-
-	// Ensure all events are received
-	for event, eventChannel := range events {
-
-		select {
-		case <-time.After(30 * time.Second):
-			// Fail the test
-			observer.CheckEventError(c, containerID, event, matcher)
-			c.FailNow()
-		case <-eventChannel:
-			// Ignore, event received
-		}
-	}
-
-	// Ensure both events and container endpoints are passed to the authorization plugin
-	assertURIRecorded(c, s.ctrl.requestsURIs, "/events")
-	assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create")
-	assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID))
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) {
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Allow = true
-	s.ctrl.resRes.Err = errorMessage
-
-	// Ensure command is blocked
-	res, err := s.d.Cmd("ps")
-	c.Assert(err, check.NotNil)
-
-	c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage))
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) {
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Err = errorMessage
-
-	// Ensure command is blocked
-	res, err := s.d.Cmd("ps")
-	c.Assert(err, check.NotNil)
-
-	c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage))
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) {
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin)
-
-	s.ctrl.reqRes.Allow = true
-	s.ctrl.resRes.Allow = true
-
-	out, err := s.d.Cmd("ps")
-	c.Assert(err, check.IsNil, check.Commentf(out))
-
-	// assert plugin is only called once..
-	c.Assert(s.ctrl.psRequestCnt, check.Equals, 1)
-	c.Assert(s.ctrl.psResponseCnt, check.Equals, 1)
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) {
-	s.d.Start(c, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Allow = true
-	s.ctrl.resRes.Allow = true
-	s.d.LoadBusybox(c)
-
-	tmp, err := ioutil.TempDir("", "test-authz-load-import")
-	c.Assert(err, check.IsNil)
-	defer os.RemoveAll(tmp)
-
-	savedImagePath := filepath.Join(tmp, "save.tar")
-
-	out, err := s.d.Cmd("save", "-o", savedImagePath, "busybox")
-	c.Assert(err, check.IsNil, check.Commentf(out))
-	out, err = s.d.Cmd("load", "--input", savedImagePath)
-	c.Assert(err, check.IsNil, check.Commentf(out))
-
-	exportedImagePath := filepath.Join(tmp, "export.tar")
-
-	out, err = s.d.Cmd("run", "-d", "--name", "testexport", "busybox")
-	c.Assert(err, check.IsNil, check.Commentf(out))
-	out, err = s.d.Cmd("export", "-o", exportedImagePath, "testexport")
-	c.Assert(err, check.IsNil, check.Commentf(out))
-	out, err = s.d.Cmd("import", exportedImagePath)
-	c.Assert(err, check.IsNil, check.Commentf(out))
-}
-
-func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) {
-	s.d.Start(c, "--debug", "--authorization-plugin="+testAuthZPlugin)
-	s.ctrl.reqRes.Allow = true
-	s.ctrl.resRes.Allow = true
-	s.d.LoadBusybox(c)
-
-	daemonURL, err := url.Parse(s.d.Sock())
-
-	conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10)
-	c.Assert(err, check.IsNil)
-	client := httputil.NewClientConn(conn, nil)
-	req, err := http.NewRequest("GET", "/version", nil)
-	c.Assert(err, check.IsNil)
-	resp, err := client.Do(req)
-
-	c.Assert(err, check.IsNil)
-	c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json")
-}
-
-// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin
-func assertURIRecorded(c *check.C, uris []string, uri string) {
-	var found bool
-	for _, u := range uris {
-		if strings.Contains(u, uri) {
-			found = true
-			break
-		}
-	}
-	if !found {
-		c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ","))
-	}
-}
diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go
index 59213e5..89e62c1 100644
--- a/integration-cli/docker_cli_build_test.go
+++ b/integration-cli/docker_cli_build_test.go
@@ -23,8 +23,8 @@
 	"github.com/docker/docker/integration-cli/cli/build/fakecontext"
 	"github.com/docker/docker/integration-cli/cli/build/fakegit"
 	"github.com/docker/docker/integration-cli/cli/build/fakestorage"
+	"github.com/docker/docker/internal/testutil"
 	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/stringutils"
 	"github.com/go-check/check"
 	"github.com/gotestyourself/gotestyourself/icmd"
 	digest "github.com/opencontainers/go-digest"
@@ -1173,12 +1173,13 @@
 	containerCountBefore := getContainerCount(c)
 	name := "testbuildforcerm"
 
-	buildImage(name, cli.WithFlags("--force-rm"), build.WithBuildContext(c,
-		build.WithFile("Dockerfile", `FROM `+minimalBaseImage()+`
+	r := buildImage(name, cli.WithFlags("--force-rm"), build.WithBuildContext(c,
+		build.WithFile("Dockerfile", `FROM busybox
 	RUN true
-	RUN thiswillfail`))).Assert(c, icmd.Expected{
-		ExitCode: 1,
-	})
+	RUN thiswillfail`)))
+	if r.ExitCode != 1 && r.ExitCode != 127 { // different on Linux / Windows
+		c.Fatalf("Wrong exit code")
+	}
 
 	containerCountAfter := getContainerCount(c)
 	if containerCountBefore != containerCountAfter {
@@ -3184,7 +3185,7 @@
 
 // FIXME(vdemeester) should be a unit test
 func (s *DockerSuite) TestBuildInvalidTag(c *check.C) {
-	name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200)
+	name := "abcd:" + testutil.GenerateRandomAlphaOnlyString(200)
 	buildImage(name, build.WithDockerfile("FROM "+minimalBaseImage()+"\nMAINTAINER quux\n")).Assert(c, icmd.Expected{
 		ExitCode: 125,
 		Err:      "invalid reference format",
@@ -4542,7 +4543,6 @@
 }
 
 func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) {
-	testRequires(c, DaemonIsLinux) // Windows does not support ARG
 	imgName := "bldvarstest"
 
 	wdVar := "WDIR"
@@ -4559,6 +4559,10 @@
 	userVal := "testUser"
 	volVar := "VOL"
 	volVal := "/testVol/"
+	if DaemonIsWindows() {
+		volVal = "C:\\testVol"
+		wdVal = "C:\\tmp"
+	}
 
 	buildImageSuccessfully(c, imgName,
 		cli.WithFlags(
@@ -4594,7 +4598,7 @@
 	)
 
 	res := inspectField(c, imgName, "Config.WorkingDir")
-	c.Check(res, check.Equals, filepath.ToSlash(wdVal))
+	c.Check(filepath.ToSlash(res), check.Equals, filepath.ToSlash(wdVal))
 
 	var resArr []string
 	inspectFieldAndUnmarshall(c, imgName, "Config.Env", &resArr)
diff --git a/integration-cli/docker_cli_build_unix_test.go b/integration-cli/docker_cli_build_unix_test.go
index dbcf00b..d857bd2 100644
--- a/integration-cli/docker_cli_build_unix_test.go
+++ b/integration-cli/docker_cli_build_unix_test.go
@@ -27,17 +27,18 @@
 func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) {
 	testRequires(c, cpuCfsQuota)
 	name := "testbuildresourceconstraints"
+	buildLabel := "DockerSuite.TestBuildResourceConstraintsAreUsed"
 
 	ctx := fakecontext.New(c, "", fakecontext.WithDockerfile(`
 	FROM hello-world:frozen
 	RUN ["/hello"]
 	`))
 	cli.Docker(
-		cli.Args("build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, "."),
+		cli.Args("build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "--label="+buildLabel, "-t", name, "."),
 		cli.InDir(ctx.Dir),
 	).Assert(c, icmd.Success)
 
-	out := cli.DockerCmd(c, "ps", "-lq").Combined()
+	out := cli.DockerCmd(c, "ps", "-lq", "--filter", "label="+buildLabel).Combined()
 	cID := strings.TrimSpace(out)
 
 	type hostConfig struct {
@@ -148,6 +149,11 @@
 	if err := buildCmd.Start(); err != nil {
 		c.Fatalf("failed to run build: %s", err)
 	}
+	// always clean up
+	defer func() {
+		buildCmd.Process.Kill()
+		buildCmd.Wait()
+	}()
 
 	matchCID := regexp.MustCompile("Running in (.+)")
 	scanner := bufio.NewScanner(stdoutBuild)
diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go
index c7115c8..0c68271 100644
--- a/integration-cli/docker_cli_by_digest_test.go
+++ b/integration-cli/docker_cli_by_digest_test.go
@@ -407,6 +407,8 @@
 }
 
 func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) {
+	existingContainers := ExistingContainerIDs(c)
+
 	digest, err := setupImage(c)
 	c.Assert(err, checker.IsNil, check.Commentf("error setting up image"))
 
@@ -438,7 +440,7 @@
 
 	// Valid imageReference
 	out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference)
-	checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs)
+	checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), imageReference, expectedIDs)
 }
 
 func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) {
diff --git a/integration-cli/docker_cli_config_ls_test.go b/integration-cli/docker_cli_config_ls_test.go
index 5c07012..5f002bc 100644
--- a/integration-cli/docker_cli_config_ls_test.go
+++ b/integration-cli/docker_cli_config_ls_test.go
@@ -11,6 +11,7 @@
 )
 
 func (s *DockerSwarmSuite) TestConfigList(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	d := s.AddDaemon(c, true, true)
 
 	testName0 := "test0"
diff --git a/integration-cli/docker_cli_cp_from_container_test.go b/integration-cli/docker_cli_cp_from_container_test.go
index 687aec8..0a282f5 100644
--- a/integration-cli/docker_cli_cp_from_container_test.go
+++ b/integration-cli/docker_cli_cp_from_container_test.go
@@ -50,33 +50,6 @@
 	c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err))
 }
 
-// Test for error when SRC is a valid file or directory,
-// bu the DST parent directory does not exist.
-func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) {
-	testRequires(c, DaemonIsLinux)
-	containerID := makeTestContainer(c, testContainerOptions{addContent: true})
-
-	tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists")
-	defer os.RemoveAll(tmpDir)
-
-	makeTestContentInDir(c, tmpDir)
-
-	// Try with a file source.
-	srcPath := containerCpPath(containerID, "/file1")
-	dstPath := cpPath(tmpDir, "notExists", "file1")
-	_, dstStatErr := os.Lstat(filepath.Dir(dstPath))
-	c.Assert(os.IsNotExist(dstStatErr), checker.True)
-
-	err := runDockerCp(c, srcPath, dstPath, nil)
-	c.Assert(err.Error(), checker.Contains, dstStatErr.Error())
-
-	// Try with a directory source.
-	srcPath = containerCpPath(containerID, "/dir1")
-
-	err = runDockerCp(c, srcPath, dstPath, nil)
-	c.Assert(err.Error(), checker.Contains, dstStatErr.Error())
-}
-
 // Test for error when DST ends in a trailing
 // path separator but exists as a file.
 func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) {
diff --git a/integration-cli/docker_cli_cp_utils_test.go b/integration-cli/docker_cli_cp_utils_test.go
index e517fc0..402a87e 100644
--- a/integration-cli/docker_cli_cp_utils_test.go
+++ b/integration-cli/docker_cli_cp_utils_test.go
@@ -193,9 +193,7 @@
 
 	args := []string{"cp"}
 
-	for _, param := range params {
-		args = append(args, param)
-	}
+	args = append(args, params...)
 
 	args = append(args, src, dst)
 
diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go
index 7e954b9..6865b92 100644
--- a/integration-cli/docker_cli_daemon_test.go
+++ b/integration-cli/docker_cli_daemon_test.go
@@ -28,6 +28,7 @@
 	"github.com/docker/docker/api"
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/client"
+	moby_daemon "github.com/docker/docker/daemon"
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/daemon"
@@ -48,6 +49,7 @@
 func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) {
 	cmd := exec.Command(dockerBinary, "daemon", "--storage-driver=vfs", "--debug")
 	err := cmd.Start()
+	go cmd.Wait()
 	c.Assert(err, checker.IsNil, check.Commentf("could not start daemon using 'docker daemon'"))
 
 	c.Assert(cmd.Process.Kill(), checker.IsNil)
@@ -1448,7 +1450,8 @@
 	c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment)
 
 	// kill the container
-	icmd.RunCommand(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id).Assert(c, icmd.Success)
+	icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
+		"--namespace", moby_daemon.MainNamespace, "tasks", "kill", id).Assert(c, icmd.Success)
 
 	// restart daemon.
 	d.Restart(c)
@@ -1825,7 +1828,7 @@
 	defer s.d.Stop(c)
 
 	// pull a repository large enough to fill the mount point
-	pullOut, err := s.d.Cmd("pull", "registry:2")
+	pullOut, err := s.d.Cmd("pull", "debian:stretch")
 	c.Assert(err, checker.NotNil, check.Commentf(pullOut))
 	c.Assert(pullOut, checker.Contains, "no space left on device")
 }
@@ -1987,7 +1990,6 @@
 
 // TestDaemonRestartWithKilledRunningContainer requires live restore of running containers
 func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check.C) {
-	// TODO(mlaventure): Not sure what would the exit code be on windows
 	testRequires(t, DaemonIsLinux)
 	s.d.StartWithBusybox(t)
 
@@ -2008,7 +2010,8 @@
 	}
 
 	// kill the container
-	icmd.RunCommand(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", cid).Assert(t, icmd.Success)
+	icmd.RunCommand(ctrBinary, "--address", "/var/run/docker/containerd/docker-containerd.sock",
+		"--namespace", moby_daemon.MainNamespace, "tasks", "kill", cid).Assert(t, icmd.Success)
 
 	// Give time to containerd to process the command if we don't
 	// the exit event might be received after we do the inspect
@@ -2076,7 +2079,6 @@
 
 // TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers.
 func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *check.C) {
-	// TODO(mlaventure): Not sure what would the exit code be on windows
 	testRequires(t, DaemonIsLinux)
 	s.d.StartWithBusybox(t, "--live-restore")
 
@@ -2103,8 +2105,9 @@
 	// resume the container
 	result := icmd.RunCommand(
 		ctrBinary,
-		"--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock",
-		"containers", "resume", cid)
+		"--address", "/var/run/docker/containerd/docker-containerd.sock",
+		"--namespace", moby_daemon.MainNamespace,
+		"tasks", "resume", cid)
 	result.Assert(t, icmd.Success)
 
 	// Give time to containerd to process the command if we don't
@@ -2159,9 +2162,9 @@
 }
 
 func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) {
-	testRequires(c, DaemonIsLinux, NotPpc64le)
+	testRequires(c, DaemonIsLinux)
 
-	infoLog := "\x1b[34mINFO\x1b"
+	infoLog := "\x1b[36mINFO\x1b"
 
 	b := bytes.NewBuffer(nil)
 	done := make(chan bool)
@@ -2209,7 +2212,7 @@
 }
 
 func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) {
-	testRequires(c, DaemonIsLinux, NotPpc64le)
+	testRequires(c, DaemonIsLinux)
 
 	debugLog := "\x1b[37mDEBU\x1b"
 
@@ -2786,7 +2789,7 @@
 	case <-time.After(5 * time.Second):
 	}
 
-	expectedMessage := `level=debug msg="start clean shutdown of all containers with a 3 seconds timeout..."`
+	expectedMessage := `level=debug msg="daemon configured with a 3 seconds minimum shutdown timeout"`
 	content, err := s.d.ReadLogFile()
 	c.Assert(err, checker.IsNil)
 	c.Assert(string(content), checker.Contains, expectedMessage)
diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go
index b36f0be..dff54a4 100644
--- a/integration-cli/docker_cli_events_test.go
+++ b/integration-cli/docker_cli_events_test.go
@@ -36,7 +36,7 @@
 	// List of available time formats to --since
 	unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) }
 	rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) }
-	duration := func(t time.Time) string { return time.Now().Sub(t).String() }
+	duration := func(t time.Time) string { return time.Since(t).String() }
 
 	// --since=$start must contain only the 'untag' event
 	for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} {
@@ -86,6 +86,7 @@
 	// timeouts creating so many containers simultaneously. This is a due to
 	// a bug in the Windows platform. It will be fixed in a Windows Update.
 	numContainers := 17
+	eventPerContainer := 7 // create, attach, network connect, start, die, network disconnect, destroy
 	numConcurrentContainers := numContainers
 	if testEnv.DaemonPlatform() == "windows" {
 		numConcurrentContainers = 4
@@ -93,17 +94,19 @@
 	sem := make(chan bool, numConcurrentContainers)
 	errChan := make(chan error, numContainers)
 
+	startTime := daemonUnixTime(c)
+
 	args := []string{"run", "--rm", "busybox", "true"}
 	for i := 0; i < numContainers; i++ {
 		sem <- true
-		go func() {
+		go func(i int) {
 			defer func() { <-sem }()
 			out, err := exec.Command(dockerBinary, args...).CombinedOutput()
 			if err != nil {
 				err = fmt.Errorf("%v: %s", err, string(out))
 			}
 			errChan <- err
-		}()
+		}(i)
 	}
 
 	// Wait for all goroutines to finish
@@ -116,10 +119,10 @@
 		c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " ")))
 	}
 
-	out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c))
+	out, _ := dockerCmd(c, "events", "--since="+startTime, "--until", daemonUnixTime(c))
 	events := strings.Split(out, "\n")
 	nEvents := len(events) - 1
-	c.Assert(nEvents, checker.Equals, 256, check.Commentf("events should be limited to 256, but received %d", nEvents))
+	c.Assert(nEvents, checker.Equals, numContainers*eventPerContainer, check.Commentf("events should be limited to 256, but received %d", nEvents))
 }
 
 func (s *DockerSuite) TestEventsContainerEvents(c *check.C) {
@@ -533,7 +536,10 @@
 	c.Assert(err, checker.IsNil)
 	defer stdout.Close()
 	c.Assert(cmd.Start(), checker.IsNil)
-	defer cmd.Process.Kill()
+	defer func() {
+		cmd.Process.Kill()
+		cmd.Wait()
+	}()
 
 	// Make sure we're done attaching by writing/reading some stuff
 	_, err = stdin.Write([]byte("hello\n"))
diff --git a/integration-cli/docker_cli_events_unix_test.go b/integration-cli/docker_cli_events_unix_test.go
index a2d6706..afac998 100644
--- a/integration-cli/docker_cli_events_unix_test.go
+++ b/integration-cli/docker_cli_events_unix_test.go
@@ -97,7 +97,7 @@
 	}()
 
 	c.Assert(waitRun("oomTrue"), checker.IsNil)
-	defer dockerCmd(c, "kill", "oomTrue")
+	defer dockerCmdWithResult("kill", "oomTrue")
 	containerID := inspectField(c, "oomTrue", "Id")
 
 	testActions := map[string]chan bool{
diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go
index 4442ca2..e915e1e 100644
--- a/integration-cli/docker_cli_exec_test.go
+++ b/integration-cli/docker_cli_exec_test.go
@@ -229,18 +229,18 @@
 	testRequires(c, DaemonIsLinux)
 	dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top")
 
-	err := exec.Command(dockerBinary, "exec", "testing", "top").Start()
-	c.Assert(err, checker.IsNil)
+	result := icmd.StartCmd(icmd.Command(dockerBinary, "exec", "testing", "top"))
+	result.Assert(c, icmd.Success)
+	go icmd.WaitOnCmd(0, result)
 
 	type dstop struct {
-		out []byte
+		out string
 		err error
 	}
-
 	ch := make(chan dstop)
 	go func() {
-		out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput()
-		ch <- dstop{out, err}
+		result := icmd.RunCommand(dockerBinary, "stop", "testing")
+		ch <- dstop{result.Combined(), result.Error}
 		close(ch)
 	}()
 	select {
diff --git a/integration-cli/docker_cli_experimental_test.go b/integration-cli/docker_cli_experimental_test.go
deleted file mode 100644
index 0a496fd..0000000
--- a/integration-cli/docker_cli_experimental_test.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package main
-
-import (
-	"strings"
-
-	"github.com/docker/docker/integration-cli/checker"
-	"github.com/go-check/check"
-)
-
-func (s *DockerSuite) TestExperimentalVersionTrue(c *check.C) {
-	testExperimentalInVersion(c, ExperimentalDaemon, "*true")
-}
-
-func (s *DockerSuite) TestExperimentalVersionFalse(c *check.C) {
-	testExperimentalInVersion(c, NotExperimentalDaemon, "*false")
-}
-
-func testExperimentalInVersion(c *check.C, requirement func() bool, expectedValue string) {
-	testRequires(c, requirement)
-	out, _ := dockerCmd(c, "version")
-	for _, line := range strings.Split(out, "\n") {
-		if strings.HasPrefix(strings.TrimSpace(line), "Experimental:") {
-			c.Assert(line, checker.Matches, expectedValue)
-			return
-		}
-	}
-
-	c.Fatal(`"Experimental" not found in version output`)
-}
diff --git a/integration-cli/docker_cli_external_graphdriver_unix_test.go b/integration-cli/docker_cli_external_graphdriver_unix_test.go
index 16023c9..8e766bc 100644
--- a/integration-cli/docker_cli_external_graphdriver_unix_test.go
+++ b/integration-cli/docker_cli_external_graphdriver_unix_test.go
@@ -198,12 +198,13 @@
 			return
 		}
 
+		// TODO @gupta-ak: Figure out what to do here.
 		dir, err := driver.Get(req.ID, req.MountLabel)
 		if err != nil {
 			respond(w, err)
 			return
 		}
-		respond(w, &graphDriverResponse{Dir: dir})
+		respond(w, &graphDriverResponse{Dir: dir.Path()})
 	})
 
 	mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) {
diff --git a/integration-cli/docker_cli_external_volume_driver_unix_test.go b/integration-cli/docker_cli_external_volume_driver_unix_test.go
index 5fe417c..2e2de97 100644
--- a/integration-cli/docker_cli_external_volume_driver_unix_test.go
+++ b/integration-cli/docker_cli_external_volume_driver_unix_test.go
@@ -50,6 +50,7 @@
 }
 
 func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
 		Experimental: testEnv.ExperimentalDaemon(),
 	})
diff --git a/integration-cli/docker_cli_health_test.go b/integration-cli/docker_cli_health_test.go
index 0f78a41..632830c 100644
--- a/integration-cli/docker_cli_health_test.go
+++ b/integration-cli/docker_cli_health_test.go
@@ -39,6 +39,8 @@
 func (s *DockerSuite) TestHealth(c *check.C) {
 	testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows
 
+	existingContainers := ExistingContainerIDs(c)
+
 	imageName := "testhealth"
 	buildImageSuccessfully(c, imageName, build.WithDockerfile(`FROM busybox
 		RUN echo OK > /status
@@ -49,9 +51,10 @@
 
 	// No health status before starting
 	name := "test_health"
-	dockerCmd(c, "create", "--name", name, imageName)
-	out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}")
-	c.Check(out, checker.Equals, "Created\n")
+	cid, _ := dockerCmd(c, "create", "--name", name, imageName)
+	out, _ := dockerCmd(c, "ps", "-a", "--format={{.ID}} {{.Status}}")
+	out = RemoveOutputForExistingElements(out, existingContainers)
+	c.Check(out, checker.Equals, cid[:12]+" Created\n")
 
 	// Inspect the options
 	out, _ = dockerCmd(c, "inspect",
@@ -140,7 +143,7 @@
 
 }
 
-// Github #33021
+// GitHub #33021
 func (s *DockerSuite) TestUnsetEnvVarHealthCheck(c *check.C) {
 	testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows
 
diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go
index d75974d..b6f8673 100644
--- a/integration-cli/docker_cli_info_test.go
+++ b/integration-cli/docker_cli_info_test.go
@@ -135,42 +135,48 @@
 func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 
+	existing := existingContainerStates(c)
+
 	dockerCmd(c, "run", "-d", "busybox", "top")
 	out, _ := dockerCmd(c, "info")
-	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0))
+	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"]+1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"]))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"]))
 }
 
 func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) {
 	testRequires(c, IsPausable)
 
+	existing := existingContainerStates(c)
+
 	out := runSleepingContainer(c, "-d")
 	cleanedContainerID := strings.TrimSpace(out)
 
 	dockerCmd(c, "pause", cleanedContainerID)
 
 	out, _ = dockerCmd(c, "info")
-	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0))
+	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"]))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"]+1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"]))
 }
 
 func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 
+	existing := existingContainerStates(c)
+
 	out, _ := dockerCmd(c, "run", "-d", "busybox", "top")
 	cleanedContainerID := strings.TrimSpace(out)
 
 	dockerCmd(c, "stop", cleanedContainerID)
 
 	out, _ = dockerCmd(c, "info")
-	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0))
-	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1))
+	c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", existing["Containers"]+1))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", existing["ContainersRunning"]))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", existing["ContainersPaused"]))
+	c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", existing["ContainersStopped"]+1))
 }
 
 func (s *DockerSuite) TestInfoDebug(c *check.C) {
@@ -237,3 +243,16 @@
 	c.Assert(err, checker.IsNil)
 	c.Assert(out, checker.Contains, "WARNING: labels with duplicate keys and conflicting values have been deprecated")
 }
+
+func existingContainerStates(c *check.C) map[string]int {
+	out, _ := dockerCmd(c, "info", "--format", "{{json .}}")
+	var m map[string]interface{}
+	err := json.Unmarshal([]byte(out), &m)
+	c.Assert(err, checker.IsNil)
+	res := map[string]int{}
+	res["Containers"] = int(m["Containers"].(float64))
+	res["ContainersRunning"] = int(m["ContainersRunning"].(float64))
+	res["ContainersPaused"] = int(m["ContainersPaused"].(float64))
+	res["ContainersStopped"] = int(m["ContainersStopped"].(float64))
+	return res
+}
diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go
index ea1c269..0a5aac5 100644
--- a/integration-cli/docker_cli_kill_test.go
+++ b/integration-cli/docker_cli_kill_test.go
@@ -1,13 +1,12 @@
 package main
 
 import (
-	"net/http"
 	"strings"
 	"time"
 
-	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/cli"
+	"github.com/docker/docker/integration-cli/request"
 	"github.com/go-check/check"
 	"github.com/gotestyourself/gotestyourself/icmd"
 	"golang.org/x/net/context"
@@ -131,8 +130,7 @@
 	testRequires(c, DaemonIsLinux) // Windows only supports 1.25 or later
 	runSleepingContainer(c, "--name", "docker-kill-test-api", "-d")
 	dockerCmd(c, "stop", "docker-kill-test-api")
-	var httpClient *http.Client
-	cli, err := client.NewClient(daemonHost(), "v1.19", httpClient, nil)
+	cli, err := request.NewEnvClientWithVersion("v1.19")
 	c.Assert(err, check.IsNil)
 	defer cli.Close()
 	err = cli.ContainerKill(context.Background(), "docker-kill-test-api", "SIGKILL")
diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go
index 4f14634..fb99b28 100644
--- a/integration-cli/docker_cli_logs_test.go
+++ b/integration-cli/docker_cli_logs_test.go
@@ -10,7 +10,7 @@
 
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/cli"
-	"github.com/docker/docker/pkg/jsonlog"
+	"github.com/docker/docker/pkg/jsonmessage"
 	"github.com/go-check/check"
 	"github.com/gotestyourself/gotestyourself/icmd"
 )
@@ -55,7 +55,7 @@
 
 	for _, l := range lines {
 		if l != "" {
-			_, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l))
+			_, err := time.Parse(jsonmessage.RFC3339NanoFixed+" ", ts.FindString(l))
 			c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l))
 			// ensure we have padded 0's
 			c.Assert(l[29], checker.Equals, uint8('Z'))
@@ -214,14 +214,15 @@
 func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) {
 	// TODO Windows: Fix this test for TP5.
 	testRequires(c, DaemonIsLinux)
-	out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`)
+	expected := 150000
+	out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", fmt.Sprintf("usleep 600000; yes X | head -c %d", expected))
 
 	id := strings.TrimSpace(out)
 
 	stopSlowRead := make(chan bool)
 
 	go func() {
-		exec.Command(dockerBinary, "wait", id).Run()
+		dockerCmd(c, "wait", id)
 		stopSlowRead <- true
 	}()
 
@@ -229,6 +230,7 @@
 	stdout, err := logCmd.StdoutPipe()
 	c.Assert(err, checker.IsNil)
 	c.Assert(logCmd.Start(), checker.IsNil)
+	defer func() { go logCmd.Wait() }()
 
 	// First read slowly
 	bytes1, err := ConsumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead)
@@ -238,8 +240,9 @@
 	bytes2, err := ConsumeWithSpeed(stdout, 32*1024, 0, nil)
 	c.Assert(err, checker.IsNil)
 
+	c.Assert(logCmd.Wait(), checker.IsNil)
+
 	actual := bytes1 + bytes2
-	expected := 200000
 	c.Assert(actual, checker.Equals, expected)
 }
 
@@ -277,6 +280,7 @@
 	r, w := io.Pipe()
 	cmd.Stdout = w
 	c.Assert(cmd.Start(), checker.IsNil)
+	go cmd.Wait()
 
 	// Make sure pipe is written to
 	chErr := make(chan error)
@@ -288,6 +292,7 @@
 	c.Assert(<-chErr, checker.IsNil)
 	c.Assert(cmd.Process.Kill(), checker.IsNil)
 	r.Close()
+	cmd.Wait()
 	// NGoroutines is not updated right away, so we need to wait before failing
 	c.Assert(waitForGoroutines(nroutines), checker.IsNil)
 }
@@ -301,8 +306,10 @@
 	c.Assert(err, checker.IsNil)
 	cmd := exec.Command(dockerBinary, "logs", "-f", id)
 	c.Assert(cmd.Start(), checker.IsNil)
+	go cmd.Wait()
 	time.Sleep(200 * time.Millisecond)
 	c.Assert(cmd.Process.Kill(), checker.IsNil)
+	cmd.Wait()
 
 	// NGoroutines is not updated right away, so we need to wait before failing
 	c.Assert(waitForGoroutines(nroutines), checker.IsNil)
diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go
index 2fc0724..4bb5423 100644
--- a/integration-cli/docker_cli_network_unix_test.go
+++ b/integration-cli/docker_cli_network_unix_test.go
@@ -10,7 +10,6 @@
 	"net/http"
 	"net/http/httptest"
 	"os"
-	"path/filepath"
 	"strings"
 	"time"
 
@@ -288,39 +287,6 @@
 	}
 }
 
-func (s *DockerSuite) TestNetworkLsFormat(c *check.C) {
-	testRequires(c, DaemonIsLinux)
-	out, _ := dockerCmd(c, "network", "ls", "--format", "{{.Name}}")
-	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
-
-	expected := []string{"bridge", "host", "none"}
-	var names []string
-	names = append(names, lines...)
-	c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names))
-}
-
-func (s *DockerSuite) TestNetworkLsFormatDefaultFormat(c *check.C) {
-	testRequires(c, DaemonIsLinux)
-
-	config := `{
-		"networksFormat": "{{ .Name }} default"
-}`
-	d, err := ioutil.TempDir("", "integration-cli-")
-	c.Assert(err, checker.IsNil)
-	defer os.RemoveAll(d)
-
-	err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644)
-	c.Assert(err, checker.IsNil)
-
-	out, _ := dockerCmd(c, "--config", d, "network", "ls")
-	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
-
-	expected := []string{"bridge default", "host default", "none default"}
-	var names []string
-	names = append(names, lines...)
-	c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names))
-}
-
 func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) {
 	predefined := []string{"bridge", "host", "none", "default"}
 	for _, net := range predefined {
@@ -351,6 +317,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) {
+	testRequires(c, OnlyDefaultNetworks)
 	testNet := "testnet1"
 	testLabel := "foo"
 	testValue := "bar"
@@ -624,6 +591,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkIPAMMultipleNetworks(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	// test0 bridge network
 	dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1")
 	assertNwIsAvailable(c, "test1")
@@ -664,6 +632,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkCustomIPAM(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	// Create a bridge network using custom ipam driver
 	dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "br0")
 	assertNwIsAvailable(c, "br0")
@@ -679,6 +648,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkIPAMOptions(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	// Create a bridge network using custom ipam driver and options
 	dockerCmd(c, "network", "create", "--ipam-driver", dummyIPAMDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0")
 	assertNwIsAvailable(c, "br0")
@@ -691,6 +661,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkNullIPAMDriver(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	// Create a network with null ipam driver
 	_, _, err := dockerCmdWithError("network", "create", "-d", dummyNetworkDriver, "--ipam-driver", "null", "test000")
 	c.Assert(err, check.IsNil)
@@ -796,6 +767,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt")
 	assertNwIsAvailable(c, "testopt")
 	gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData]
@@ -981,6 +953,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	// Verify exposed ports are present in ps output when running a container on
 	// a network managed by a driver which does not provide the default gateway
 	// for the container
@@ -1007,7 +980,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) {
-	testRequires(c, DaemonIsLinux, NotUserNamespace)
+	testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon)
 	dnd := "dnd"
 	did := "did"
 
@@ -1048,6 +1021,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	// Verify endpoint MAC address is correctly populated in container's network settings
 	nwn := "ov"
 	ctn := "bb"
@@ -1113,6 +1087,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	cName := "bb"
 	nwList := []string{"nw1", "nw2", "nw3"}
 
@@ -1131,6 +1106,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	cName := "cc"
 	nwList := []string{"nw1", "nw2", "nw3"}
 
@@ -1157,7 +1133,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) {
-	testRequires(c, DaemonIsLinux, NotUserNamespace)
+	testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon)
 	s.d.StartWithBusybox(c)
 
 	// Run a few containers on host network
@@ -1283,6 +1259,7 @@
 }
 
 func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) {
+	testRequires(c, SameHostDaemon)
 	dockerCmd(c, "network", "create", "test")
 	dockerCmd(c, "create", "--name=foo", "busybox", "top")
 	dockerCmd(c, "network", "connect", "test", "foo")
@@ -1648,6 +1625,7 @@
 func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) {
 	dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top")
 	c.Assert(waitRun("bb"), check.IsNil)
+	defer dockerCmd(c, "stop", "bb")
 
 	ns0 := inspectField(c, "bb", "NetworkSettings.Networks.bridge")
 
@@ -1810,7 +1788,7 @@
 // TestConntrackFlowsLeak covers the failure scenario of ticket: https://github.com/docker/docker/issues/8795
 // Validates that conntrack is correctly cleaned once a container is destroyed
 func (s *DockerNetworkSuite) TestConntrackFlowsLeak(c *check.C) {
-	testRequires(c, IsAmd64, DaemonIsLinux, Network)
+	testRequires(c, IsAmd64, DaemonIsLinux, Network, SameHostDaemon)
 
 	// Create a new network
 	cli.DockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind")
diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go
index bcb87f5..84058cd 100644
--- a/integration-cli/docker_cli_port_test.go
+++ b/integration-cli/docker_cli_port_test.go
@@ -5,6 +5,7 @@
 	"net"
 	"regexp"
 	"sort"
+	"strconv"
 	"strings"
 
 	"github.com/docker/docker/integration-cli/checker"
@@ -148,9 +149,8 @@
 
 	out, _ = dockerCmd(c, "port", ID)
 
-	err = assertPortList(c, out, []string{
-		"80/tcp -> 0.0.0.0:8000",
-		"80/udp -> 0.0.0.0:8000"})
+	// Running this test multiple times causes the TCP port to increment.
+	err = assertPortRange(c, out, []int{8000, 8080}, []int{8000, 8080})
 	// Port list is not correct
 	c.Assert(err, checker.IsNil)
 	dockerCmd(c, "rm", "-f", ID)
@@ -173,6 +173,38 @@
 	return nil
 }
 
+func assertPortRange(c *check.C, out string, expectedTcp, expectedUdp []int) error {
+	lines := strings.Split(strings.Trim(out, "\n "), "\n")
+
+	var validTcp, validUdp bool
+	for _, l := range lines {
+		// 80/tcp -> 0.0.0.0:8015
+		port, err := strconv.Atoi(strings.Split(l, ":")[1])
+		if err != nil {
+			return err
+		}
+		if strings.Contains(l, "tcp") && expectedTcp != nil {
+			if port < expectedTcp[0] || port > expectedTcp[1] {
+				return fmt.Errorf("tcp port (%d) not in range expected range %d-%d", port, expectedTcp[0], expectedTcp[1])
+			}
+			validTcp = true
+		}
+		if strings.Contains(l, "udp") && expectedUdp != nil {
+			if port < expectedUdp[0] || port > expectedUdp[1] {
+				return fmt.Errorf("udp port (%d) not in range expected range %d-%d", port, expectedUdp[0], expectedUdp[1])
+			}
+			validUdp = true
+		}
+	}
+	if !validTcp {
+		return fmt.Errorf("tcp port not found")
+	}
+	if !validUdp {
+		return fmt.Errorf("udp port not found")
+	}
+	return nil
+}
+
 func stopRemoveContainer(id string, c *check.C) {
 	dockerCmd(c, "rm", "-f", id)
 }
diff --git a/integration-cli/docker_cli_prune_unix_test.go b/integration-cli/docker_cli_prune_unix_test.go
index bea4f4f..763a72d 100644
--- a/integration-cli/docker_cli_prune_unix_test.go
+++ b/integration-cli/docker_cli_prune_unix_test.go
@@ -19,13 +19,21 @@
 func pruneNetworkAndVerify(c *check.C, d *daemon.Swarm, kept, pruned []string) {
 	_, err := d.Cmd("network", "prune", "--force")
 	c.Assert(err, checker.IsNil)
-	out, err := d.Cmd("network", "ls", "--format", "{{.Name}}")
-	c.Assert(err, checker.IsNil)
+
 	for _, s := range kept {
-		c.Assert(out, checker.Contains, s)
+		waitAndAssert(c, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) {
+			out, err := d.Cmd("network", "ls", "--format", "{{.Name}}")
+			c.Assert(err, checker.IsNil)
+			return out, nil
+		}, checker.Contains, s)
 	}
+
 	for _, s := range pruned {
-		c.Assert(out, checker.Not(checker.Contains), s)
+		waitAndAssert(c, defaultReconciliationTimeout, func(*check.C) (interface{}, check.CommentInterface) {
+			out, err := d.Cmd("network", "ls", "--format", "{{.Name}}")
+			c.Assert(err, checker.IsNil)
+			return out, nil
+		}, checker.Not(checker.Contains), s)
 	}
 }
 
@@ -46,7 +54,7 @@
 
 	serviceName := "testprunesvc"
 	replicas := 1
-	out, err := d.Cmd("service", "create", "--no-resolve-image",
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image",
 		"--name", serviceName,
 		"--replicas", strconv.Itoa(replicas),
 		"--network", "n3",
@@ -64,6 +72,7 @@
 	_, err = d.Cmd("service", "rm", serviceName)
 	c.Assert(err, checker.IsNil)
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 0)
+
 	pruneNetworkAndVerify(c, d, []string{}, []string{"n1", "n3"})
 }
 
diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go
index 736103e..bea1261 100644
--- a/integration-cli/docker_cli_ps_test.go
+++ b/integration-cli/docker_cli_ps_test.go
@@ -19,6 +19,8 @@
 )
 
 func (s *DockerSuite) TestPsListContainersBase(c *check.C) {
+	existingContainers := ExistingContainerIDs(c)
+
 	out := runSleepingContainer(c, "-d")
 	firstID := strings.TrimSpace(out)
 
@@ -43,79 +45,79 @@
 
 	// all
 	out, _ = dockerCmd(c, "ps", "-a")
-	c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out))
 
 	// running
 	out, _ = dockerCmd(c, "ps")
-	c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out))
 
 	// limit
 	out, _ = dockerCmd(c, "ps", "-n=2", "-a")
 	expected := []string{fourthID, thirdID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-n=2")
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out))
 
 	// filter since
 	out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a")
 	expected = []string{fourthID, thirdID, secondID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-f", "since="+firstID)
 	expected = []string{fourthID, secondID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID)
 	expected = []string{fourthID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out))
 
 	// filter before
 	out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a")
 	expected = []string{thirdID, secondID, firstID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID)
 	expected = []string{secondID, firstID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID)
 	expected = []string{secondID, firstID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out))
 
 	// filter since & before
 	out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a")
 	expected = []string{thirdID, secondID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID)
 	expected = []string{secondID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out))
 
 	// filter since & limit
 	out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a")
 	expected = []string{fourthID, thirdID}
 
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2")
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out))
 
 	// filter before & limit
 	out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a")
 	expected = []string{thirdID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1")
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out))
 
 	// filter since & filter before & limit
 	out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a")
 	expected = []string{thirdID}
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out))
 
 	out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1")
-	c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out))
+	c.Assert(assertContainerList(RemoveOutputForExistingElements(out, existingContainers), expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out))
 
 }
 
@@ -185,6 +187,8 @@
 }
 
 func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) {
+	existingContainers := ExistingContainerIDs(c)
+
 	// start exited container
 	out := cli.DockerCmd(c, "run", "-d", "busybox").Combined()
 	firstID := strings.TrimSpace(out)
@@ -199,11 +203,11 @@
 	// filter containers by exited
 	out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited").Combined()
 	containerOut := strings.TrimSpace(out)
-	c.Assert(containerOut, checker.Equals, firstID)
+	c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, firstID)
 
 	out = cli.DockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running").Combined()
 	containerOut = strings.TrimSpace(out)
-	c.Assert(containerOut, checker.Equals, secondID)
+	c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, secondID)
 
 	result := cli.Docker(cli.Args("ps", "-a", "-q", "--filter=status=rubbish"), cli.WithTimeout(time.Second*60))
 	result.Assert(c, icmd.Expected{
@@ -222,11 +226,12 @@
 
 		out = cli.DockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused").Combined()
 		containerOut = strings.TrimSpace(out)
-		c.Assert(containerOut, checker.Equals, pausedID)
+		c.Assert(RemoveOutputForExistingElements(containerOut, existingContainers), checker.Equals, pausedID)
 	}
 }
 
 func (s *DockerSuite) TestPsListContainersFilterHealth(c *check.C) {
+	existingContainers := ExistingContainerIDs(c)
 	// Test legacy no health check
 	out := runSleepingContainer(c, "--name=none_legacy")
 	containerID := strings.TrimSpace(out)
@@ -264,7 +269,7 @@
 	waitForHealthStatus(c, "passing_container", "starting", "healthy")
 
 	out = cli.DockerCmd(c, "ps", "-q", "--no-trunc", "--filter=health=healthy").Combined()
-	containerOut = strings.TrimSpace(out)
+	containerOut = strings.TrimSpace(RemoveOutputForExistingElements(out, existingContainers))
 	c.Assert(containerOut, checker.Equals, containerID, check.Commentf("Expected containerID %s, got %s for healthy filter, output: %q", containerID, containerOut, out))
 }
 
@@ -305,6 +310,8 @@
 // - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2)
 // - Filter them out :P
 func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) {
+	existingContainers := ExistingContainerIDs(c)
+
 	// Build images
 	imageName1 := "images_ps_filter_test1"
 	buildImageSuccessfully(c, imageName1, build.WithDockerfile(`FROM busybox
@@ -367,12 +374,12 @@
 	var out string
 	for _, filter := range filterTestSuite {
 		out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName)
-		checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs)
+		checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), filter.filterName, filter.expectedIDs)
 	}
 
 	// Multiple ancestor filter
 	out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged)
-	checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID})
+	checkPsAncestorFilterOutput(c, RemoveOutputForExistingElements(out, existingContainers), imageName2+","+imageName1Tagged, []string{fourthID, fifthID})
 }
 
 func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) {
@@ -469,6 +476,9 @@
 func (s *DockerSuite) TestPsRightTagName(c *check.C) {
 	// TODO Investigate further why this fails on Windows to Windows CI
 	testRequires(c, DaemonIsLinux)
+
+	existingContainers := ExistingContainerNames(c)
+
 	tag := "asybox:shmatest"
 	dockerCmd(c, "tag", "busybox", tag)
 
@@ -490,6 +500,7 @@
 
 	out, _ = dockerCmd(c, "ps", "--no-trunc")
 	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	// skip header
 	lines = lines[1:]
 	c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines)))
@@ -511,6 +522,7 @@
 func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) {
 	// Problematic on Windows as it doesn't support links as of Jan 2016
 	testRequires(c, DaemonIsLinux)
+	existingContainers := ExistingContainerIDs(c)
 	runSleepingContainer(c, "--name=first")
 	runSleepingContainer(c, "--name=second", "--link=first:first")
 
@@ -518,6 +530,7 @@
 	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
 	// strip header
 	lines = lines[1:]
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	expected := []string{"second", "first,second/first"}
 	var names []string
 	for _, l := range lines {
@@ -581,12 +594,14 @@
 func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) {
 	// Problematic on Windows as it doesn't support link as of Jan 2016
 	testRequires(c, DaemonIsLinux)
+	existingContainers := ExistingContainerNames(c)
 	//create 2 containers and link them
 	dockerCmd(c, "run", "--name=child", "-d", "busybox", "top")
 	dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top")
 
 	//use the new format capabilities to only list the names and --no-trunc to get all names
 	out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc")
+	out = RemoveOutputForExistingElements(out, existingContainers)
 	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
 	expected := []string{"parent", "child,parent/linkedone"}
 	var names []string
@@ -595,6 +610,7 @@
 
 	//now list without turning off truncation and make sure we only get the non-link names
 	out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}")
+	out = RemoveOutputForExistingElements(out, existingContainers)
 	lines = strings.Split(strings.TrimSpace(string(out)), "\n")
 	expected = []string{"parent", "child"}
 	var truncNames []string
@@ -604,30 +620,22 @@
 
 // Test for GitHub issue #21772
 func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) {
+	existingContainers := ExistingContainerNames(c)
 	runSleepingContainer(c, "--name=test1")
 	runSleepingContainer(c, "--name=test2")
 
 	//use the new format capabilities to list the names twice
 	out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}")
 	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	expected := []string{"test2 test2", "test1 test1"}
 	var names []string
 	names = append(names, lines...)
 	c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names))
 }
 
-func (s *DockerSuite) TestPsFormatHeaders(c *check.C) {
-	// make sure no-container "docker ps" still prints the header row
-	out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}")
-	c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out))
-
-	// verify that "docker ps" with a container still prints the header row also
-	runSleepingContainer(c, "--name=test")
-	out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}")
-	c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out))
-}
-
 func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) {
+	existingContainers := ExistingContainerIDs(c)
 	config := `{
 		"psFormat": "default {{ .ID }}"
 }`
@@ -642,6 +650,7 @@
 	id := strings.TrimSpace(out)
 
 	out, _ = dockerCmd(c, "--config", d, "ps", "-q")
+	out = RemoveOutputForExistingElements(out, existingContainers)
 	c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out))
 }
 
@@ -652,6 +661,8 @@
 	originalImageName := "busybox:TestPsImageIDAfterUpdate-original"
 	updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated"
 
+	existingContainers := ExistingContainerIDs(c)
+
 	icmd.RunCommand(dockerBinary, "tag", "busybox:latest", originalImageName).Assert(c, icmd.Success)
 
 	originalImageID := getIDByName(c, originalImageName)
@@ -664,6 +675,7 @@
 	result.Assert(c, icmd.Success)
 
 	lines := strings.Split(strings.TrimSpace(string(result.Combined())), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	// skip header
 	lines = lines[1:]
 	c.Assert(len(lines), checker.Equals, 1)
@@ -680,6 +692,7 @@
 	result.Assert(c, icmd.Success)
 
 	lines = strings.Split(strings.TrimSpace(string(result.Combined())), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	// skip header
 	lines = lines[1:]
 	c.Assert(len(lines), checker.Equals, 1)
@@ -710,6 +723,8 @@
 }
 
 func (s *DockerSuite) TestPsShowMounts(c *check.C) {
+	existingContainers := ExistingContainerNames(c)
+
 	prefix, slash := getPrefixAndSlashFromDaemonPlatform()
 
 	mp := prefix + slash + "test"
@@ -736,6 +751,7 @@
 	out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}")
 
 	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	c.Assert(lines, checker.HasLen, 3)
 
 	fields := strings.Fields(lines[0])
@@ -755,6 +771,7 @@
 	out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test")
 
 	lines = strings.Split(strings.TrimSpace(string(out)), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	c.Assert(lines, checker.HasLen, 1)
 
 	fields = strings.Fields(lines[0])
@@ -768,6 +785,7 @@
 	out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp)
 
 	lines = strings.Split(strings.TrimSpace(string(out)), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	c.Assert(lines, checker.HasLen, 2)
 
 	fields = strings.Fields(lines[0])
@@ -779,6 +797,7 @@
 	out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource)
 
 	lines = strings.Split(strings.TrimSpace(string(out)), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	c.Assert(lines, checker.HasLen, 1)
 
 	fields = strings.Fields(lines[0])
@@ -790,6 +809,7 @@
 	out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination)
 
 	lines = strings.Split(strings.TrimSpace(string(out)), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	c.Assert(lines, checker.HasLen, 1)
 
 	fields = strings.Fields(lines[0])
@@ -820,6 +840,8 @@
 }
 
 func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) {
+	existing := ExistingContainerIDs(c)
+
 	// TODO default network on Windows is not called "bridge", and creating a
 	// custom network fails on Windows fails with "Error response from daemon: plugin not found")
 	testRequires(c, DaemonIsLinux)
@@ -837,7 +859,7 @@
 	lines = lines[1:]
 
 	// ps output should have no containers
-	c.Assert(lines, checker.HasLen, 0)
+	c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 0)
 
 	// Filter docker ps on network bridge
 	out, _ = dockerCmd(c, "ps", "--filter", "network=bridge")
@@ -849,7 +871,7 @@
 	lines = lines[1:]
 
 	// ps output should have only one container
-	c.Assert(lines, checker.HasLen, 1)
+	c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 1)
 
 	// Making sure onbridgenetwork is on the output
 	c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n"))
@@ -864,7 +886,7 @@
 	lines = lines[1:]
 
 	//ps output should have both the containers
-	c.Assert(lines, checker.HasLen, 2)
+	c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 2)
 
 	// Making sure onbridgenetwork and onnonenetwork is on the output
 	c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n"))
@@ -885,11 +907,12 @@
 	containerOut = strings.TrimSpace(string(out))
 
 	lines = strings.Split(containerOut, "\n")
+
 	// skip header
 	lines = lines[1:]
 
 	// ps output should have only one container
-	c.Assert(lines, checker.HasLen, 1)
+	c.Assert(RemoveLinesForExistingElements(lines, existing), checker.HasLen, 1)
 
 	// Making sure onbridgenetwork is on the output
 	c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n"))
@@ -927,13 +950,16 @@
 
 // Test case for 30291
 func (s *DockerSuite) TestPsFormatTemplateWithArg(c *check.C) {
+	existingContainers := ExistingContainerNames(c)
 	runSleepingContainer(c, "-d", "--name", "top", "--label", "some.label=label.foo-bar")
 	out, _ := dockerCmd(c, "ps", "--format", `{{.Names}} {{.Label "some.label"}}`)
+	out = RemoveOutputForExistingElements(out, existingContainers)
 	c.Assert(strings.TrimSpace(out), checker.Equals, "top label.foo-bar")
 }
 
 func (s *DockerSuite) TestPsListContainersFilterPorts(c *check.C) {
 	testRequires(c, DaemonIsLinux)
+	existingContainers := ExistingContainerIDs(c)
 
 	out, _ := dockerCmd(c, "run", "-d", "--publish=80", "busybox", "top")
 	id1 := strings.TrimSpace(out)
@@ -962,6 +988,7 @@
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id2)
 
 	out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter", "expose=8080/tcp")
+	out = RemoveOutputForExistingElements(out, existingContainers)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), id1)
 	c.Assert(strings.TrimSpace(out), checker.Equals, id2)
 }
@@ -969,11 +996,14 @@
 func (s *DockerSuite) TestPsNotShowLinknamesOfDeletedContainer(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 
+	existingContainers := ExistingContainerNames(c)
+
 	dockerCmd(c, "create", "--name=aaa", "busybox", "top")
 	dockerCmd(c, "create", "--name=bbb", "--link=aaa", "busybox", "top")
 
 	out, _ := dockerCmd(c, "ps", "--no-trunc", "-a", "--format", "{{.Names}}")
 	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
+	lines = RemoveLinesForExistingElements(lines, existingContainers)
 	expected := []string{"bbb", "aaa,bbb/aaa"}
 	var names []string
 	names = append(names, lines...)
@@ -982,5 +1012,6 @@
 	dockerCmd(c, "rm", "bbb")
 
 	out, _ = dockerCmd(c, "ps", "--no-trunc", "-a", "--format", "{{.Names}}")
+	out = RemoveOutputForExistingElements(out, existingContainers)
 	c.Assert(strings.TrimSpace(out), checker.Equals, "aaa")
 }
diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go
index fd91edb..613cdb3 100644
--- a/integration-cli/docker_cli_pull_test.go
+++ b/integration-cli/docker_cli_pull_test.go
@@ -193,25 +193,26 @@
 // results in more images than a naked pull.
 func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) {
 	testRequires(c, DaemonIsLinux)
-	s.Cmd(c, "pull", "busybox")
-	outImageCmd := s.Cmd(c, "images", "busybox")
+	s.Cmd(c, "pull", "dockercore/engine-pull-all-test-fixture")
+	outImageCmd := s.Cmd(c, "images", "dockercore/engine-pull-all-test-fixture")
 	splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n")
 	c.Assert(splitOutImageCmd, checker.HasLen, 2)
 
-	s.Cmd(c, "pull", "--all-tags=true", "busybox")
-	outImageAllTagCmd := s.Cmd(c, "images", "busybox")
+	s.Cmd(c, "pull", "--all-tags=true", "dockercore/engine-pull-all-test-fixture")
+	outImageAllTagCmd := s.Cmd(c, "images", "dockercore/engine-pull-all-test-fixture")
 	linesCount := strings.Count(outImageAllTagCmd, "\n")
 	c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd))
 
-	// Verify that the line for 'busybox:latest' is left unchanged.
+	// Verify that the line for 'dockercore/engine-pull-all-test-fixture:latest' is left unchanged.
 	var latestLine string
 	for _, line := range strings.Split(outImageAllTagCmd, "\n") {
-		if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") {
+		if strings.HasPrefix(line, "dockercore/engine-pull-all-test-fixture") && strings.Contains(line, "latest") {
 			latestLine = line
 			break
 		}
 	}
-	c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags"))
+	c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for dockercore/engine-pull-all-test-fixture:latest found after pulling all tags"))
+
 	splitLatest := strings.Fields(latestLine)
 	splitCurrent := strings.Fields(splitOutImageCmd[1])
 
@@ -227,7 +228,7 @@
 	splitCurrent[4] = ""
 	splitCurrent[5] = ""
 
-	c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags"))
+	c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("dockercore/engine-pull-all-test-fixture:latest was changed after pulling all tags"))
 }
 
 // TestPullClientDisconnect kills the client during a pull operation and verifies that the operation
@@ -243,6 +244,7 @@
 	c.Assert(err, checker.IsNil)
 	err = pullCmd.Start()
 	c.Assert(err, checker.IsNil)
+	go pullCmd.Wait()
 
 	// Cancel as soon as we get some output.
 	buf := make([]byte, 10)
@@ -273,7 +275,7 @@
 func (s *DockerSuite) TestPullLinuxImageFailsOnWindows(c *check.C) {
 	testRequires(c, DaemonIsWindows, Network)
 	_, _, err := dockerCmdWithError("pull", "ubuntu")
-	c.Assert(err.Error(), checker.Contains, "cannot be used on this platform")
+	c.Assert(err.Error(), checker.Contains, "no matching manifest")
 }
 
 // Regression test for https://github.com/docker/docker/issues/28892
diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go
index 340ad4b..6dbbb67 100644
--- a/integration-cli/docker_cli_run_test.go
+++ b/integration-cli/docker_cli_run_test.go
@@ -21,13 +21,15 @@
 	"sync"
 	"time"
 
+	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/cli"
 	"github.com/docker/docker/integration-cli/cli/build"
 	"github.com/docker/docker/integration-cli/cli/build/fakecontext"
+	"github.com/docker/docker/internal/testutil"
 	"github.com/docker/docker/pkg/mount"
+	"github.com/docker/docker/pkg/parsers/kernel"
 	"github.com/docker/docker/pkg/stringid"
-	"github.com/docker/docker/pkg/stringutils"
 	"github.com/docker/docker/runconfig"
 	"github.com/docker/go-connections/nat"
 	"github.com/docker/libnetwork/resolvconf"
@@ -35,6 +37,7 @@
 	"github.com/go-check/check"
 	"github.com/gotestyourself/gotestyourself/icmd"
 	libcontainerUser "github.com/opencontainers/runc/libcontainer/user"
+	"golang.org/x/net/context"
 )
 
 // "test123" should be printed by docker run
@@ -824,7 +827,7 @@
 	})
 	result.Assert(c, icmd.Success)
 
-	actualEnv := strings.Split(strings.TrimSpace(result.Combined()), "\n")
+	actualEnv := strings.Split(strings.TrimSuffix(result.Stdout(), "\n"), "\n")
 	sort.Strings(actualEnv)
 
 	goodEnv := []string{
@@ -1825,7 +1828,7 @@
 }
 
 func eqToBaseDiff(out string, c *check.C) bool {
-	name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32)
+	name := "eqToBaseDiff" + testutil.GenerateRandomAlphaOnlyString(32)
 	dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello")
 	cID := getIDByName(c, name)
 	baseDiff, _ := dockerCmd(c, "diff", cID)
@@ -2246,6 +2249,7 @@
 	if err := cont.Start(); err != nil {
 		c.Fatal(err)
 	}
+	defer func() { go cont.Wait() }()
 	n, err := ConsumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil)
 	if err != nil {
 		c.Fatal(err)
@@ -2725,7 +2729,7 @@
 	if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" {
 		testPriv = false
 	}
-	testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me")
+	testReadOnlyFile(c, testPriv, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel")
 }
 
 func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) {
@@ -2813,23 +2817,27 @@
 
 // run container with --rm should remove container if exit code != 0
 func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) {
+	existingContainers := ExistingContainerIDs(c)
 	name := "flowers"
 	cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "ls", "/notexists")).Assert(c, icmd.Expected{
 		ExitCode: 1,
 	})
 
 	out := cli.DockerCmd(c, "ps", "-q", "-a").Combined()
+	out = RemoveOutputForExistingElements(out, existingContainers)
 	if out != "" {
 		c.Fatal("Expected not to have containers", out)
 	}
 }
 
 func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) {
+	existingContainers := ExistingContainerIDs(c)
 	name := "sparkles"
 	cli.Docker(cli.Args("run", "--name", name, "--rm", "busybox", "commandNotFound")).Assert(c, icmd.Expected{
 		ExitCode: 127,
 	})
 	out := cli.DockerCmd(c, "ps", "-q", "-a").Combined()
+	out = RemoveOutputForExistingElements(out, existingContainers)
 	if out != "" {
 		c.Fatal("Expected not to have containers", out)
 	}
@@ -3107,6 +3115,11 @@
 	filename := createTmpFile(c, expected)
 	defer os.Remove(filename)
 
+	// for user namespaced test runs, the temp file must be accessible to unprivileged root
+	if err := os.Chmod(filename, 0646); err != nil {
+		c.Fatalf("error modifying permissions of %s: %v", filename, err)
+	}
+
 	nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
 
 	for i := range nwfiles {
@@ -3124,6 +3137,11 @@
 	filename := createTmpFile(c, "test123")
 	defer os.Remove(filename)
 
+	// for user namespaced test runs, the temp file must be accessible to unprivileged root
+	if err := os.Chmod(filename, 0646); err != nil {
+		c.Fatalf("error modifying permissions of %s: %v", filename, err)
+	}
+
 	nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
 
 	for i := range nwfiles {
@@ -3141,6 +3159,11 @@
 	filename := createTmpFile(c, "test123")
 	defer os.Remove(filename)
 
+	// for user namespaced test runs, the temp file must be accessible to unprivileged root
+	if err := os.Chmod(filename, 0646); err != nil {
+		c.Fatalf("error modifying permissions of %s: %v", filename, err)
+	}
+
 	nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"}
 
 	for i := range nwfiles {
@@ -3963,32 +3986,61 @@
 	dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
 	dockerCmd(c, "volume", "inspect", "test")
 	out, _ := dockerCmd(c, "volume", "ls", "-q")
-	c.Assert(strings.TrimSpace(out), checker.Equals, "test")
+	c.Assert(strings.TrimSpace(out), checker.Contains, "test")
 
 	dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
 	dockerCmd(c, "rm", "-fv", "test")
 	dockerCmd(c, "volume", "inspect", "test")
 	out, _ = dockerCmd(c, "volume", "ls", "-q")
-	c.Assert(strings.TrimSpace(out), checker.Equals, "test")
+	c.Assert(strings.TrimSpace(out), checker.Contains, "test")
 }
 
 func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) {
 	prefix, _ := getPrefixAndSlashFromDaemonPlatform()
 
 	dockerCmd(c, "volume", "create", "test")
-	dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
+	cid, _ := dockerCmd(c, "run", "-d", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true")
 	dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true")
 
+	cli, err := client.NewEnvClient()
+	c.Assert(err, checker.IsNil)
+	defer cli.Close()
+
+	container, err := cli.ContainerInspect(context.Background(), strings.TrimSpace(cid))
+	c.Assert(err, checker.IsNil)
+	var vname string
+	for _, v := range container.Mounts {
+		if v.Name != "test" {
+			vname = v.Name
+		}
+	}
+	c.Assert(vname, checker.Not(checker.Equals), "")
+
 	// Remove the parent so there are not other references to the volumes
 	dockerCmd(c, "rm", "-f", "parent")
 	// now remove the child and ensure the named volume (and only the named volume) still exists
 	dockerCmd(c, "rm", "-fv", "child")
 	dockerCmd(c, "volume", "inspect", "test")
 	out, _ := dockerCmd(c, "volume", "ls", "-q")
-	c.Assert(strings.TrimSpace(out), checker.Equals, "test")
+	c.Assert(strings.TrimSpace(out), checker.Contains, "test")
+	c.Assert(strings.TrimSpace(out), checker.Not(checker.Contains), vname)
 }
 
 func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) {
+	// TODO @msabansal - https://github.com/moby/moby/issues/35023. Duplicate
+	// port mappings are not errored out on RS3 builds. Temporarily disabling
+	// this test pending further investigation. Note we parse kernel.GetKernelVersion
+	// rather than system.GetOSVersion as test binaries aren't manifested, so would
+	// otherwise report build 9200.
+	if runtime.GOOS == "windows" {
+		v, err := kernel.GetKernelVersion()
+		c.Assert(err, checker.IsNil)
+		build, _ := strconv.Atoi(strings.Split(strings.SplitN(v.String(), " ", 3)[2][1:], ".")[0])
+		if build >= 16292 { // @jhowardmsft TODO - replace with final RS3 build and ==
+			c.Skip("Temporarily disabled on RS3 builds")
+		}
+	}
+
 	nroutines, err := getGoroutineNumber()
 	c.Assert(err, checker.IsNil)
 
@@ -4123,7 +4175,7 @@
 // Test that auto-remove is performed by the client on API versions that do not support daemon-side api-remove (API < 1.25)
 func (s *DockerSuite) TestRunRmPre125Api(c *check.C) {
 	name := "miss-me-when-im-gone"
-	envs := appendBaseEnv(false, "DOCKER_API_VERSION=1.24")
+	envs := appendBaseEnv(os.Getenv("DOCKER_TLS_VERIFY") != "", "DOCKER_API_VERSION=1.24")
 	cli.Docker(cli.Args("run", "--name="+name, "--rm", "busybox"), cli.WithEnvironmentVariables(envs...)).Assert(c, icmd.Success)
 
 	cli.Docker(cli.Inspect(name), cli.Format(".name")).Assert(c, icmd.Expected{
diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go
index 2176f0b..952cf0a 100644
--- a/integration-cli/docker_cli_run_unix_test.go
+++ b/integration-cli/docker_cli_run_unix_test.go
@@ -226,6 +226,7 @@
 	if err := cmd.Start(); err != nil {
 		c.Fatal(err)
 	}
+	go cmd.Wait()
 
 	bufReader := bufio.NewReader(stdout)
 	out, err := bufReader.ReadString('\n')
@@ -424,6 +425,7 @@
 	if err := cmd.Start(); err != nil {
 		c.Fatal(err)
 	}
+	go cmd.Wait()
 	c.Assert(waitRun(name), check.IsNil)
 
 	// Invalid escape sequence aba, should print aba in output
@@ -677,7 +679,7 @@
 }
 
 func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) {
-	testRequires(c, memoryReservationSupport)
+	testRequires(c, SameHostDaemon, memoryReservationSupport)
 
 	file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes"
 	out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file)
@@ -689,7 +691,7 @@
 
 func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) {
 	testRequires(c, memoryLimitSupport)
-	testRequires(c, memoryReservationSupport)
+	testRequires(c, SameHostDaemon, memoryReservationSupport)
 	out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true")
 	c.Assert(err, check.NotNil)
 	expected := "Minimum memory limit can not be less than memory reservation limit"
@@ -1060,7 +1062,7 @@
 	testRequires(c, SameHostDaemon, seccompEnabled, IsAmd64)
 	ensureSyscallTest(c)
 
-	icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test", "id").Assert(c, icmd.Success)
+	icmd.RunCommand(dockerBinary, "run", "syscall-test", "exit32-test").Assert(c, icmd.Success)
 }
 
 // TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds.
@@ -1401,7 +1403,7 @@
 
 // TestRunPIDsLimit makes sure the pids cgroup is set with --pids-limit
 func (s *DockerSuite) TestRunPIDsLimit(c *check.C) {
-	testRequires(c, pidsLimit)
+	testRequires(c, SameHostDaemon, pidsLimit)
 
 	file := "/sys/fs/cgroup/pids/pids.max"
 	out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "4", "busybox", "cat", file)
diff --git a/integration-cli/docker_cli_service_create_test.go b/integration-cli/docker_cli_service_create_test.go
index 6fc92c2..ac7eff7 100644
--- a/integration-cli/docker_cli_service_create_test.go
+++ b/integration-cli/docker_cli_service_create_test.go
@@ -76,7 +76,7 @@
 	})
 	c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--secret", testName, "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--secret", testName, "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
@@ -122,7 +122,7 @@
 	}
 
 	serviceName := "svc"
-	serviceCmd := []string{"service", "create", "--no-resolve-image", "--name", serviceName}
+	serviceCmd := []string{"service", "create", "--detach", "--no-resolve-image", "--name", serviceName}
 	serviceCmd = append(serviceCmd, secretFlags...)
 	serviceCmd = append(serviceCmd, "busybox", "top")
 	out, err := d.Cmd(serviceCmd...)
@@ -175,7 +175,7 @@
 	c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("secrets: %s", id))
 
 	serviceName := "svc"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--secret", "source=mysecret,target=target1", "--secret", "source=mysecret,target=target2", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--secret", "source=mysecret,target=target1", "--secret", "source=mysecret,target=target2", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
@@ -224,7 +224,7 @@
 	})
 	c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id))
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--config", testName, "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--config", testName, "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName)
@@ -269,7 +269,7 @@
 	}
 
 	serviceName := "svc"
-	serviceCmd := []string{"service", "create", "--no-resolve-image", "--name", serviceName}
+	serviceCmd := []string{"service", "create", "--detach", "--no-resolve-image", "--name", serviceName}
 	serviceCmd = append(serviceCmd, configFlags...)
 	serviceCmd = append(serviceCmd, "busybox", "top")
 	out, err := d.Cmd(serviceCmd...)
@@ -322,7 +322,7 @@
 	c.Assert(id, checker.Not(checker.Equals), "", check.Commentf("configs: %s", id))
 
 	serviceName := "svc"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--config", "source=myconfig,target=target1", "--config", "source=myconfig,target=target2", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--config", "source=myconfig,target=target1", "--config", "source=myconfig,target=target2", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName)
diff --git a/integration-cli/docker_cli_service_logs_test.go b/integration-cli/docker_cli_service_logs_test.go
index 1c8f3d8..b3f5827 100644
--- a/integration-cli/docker_cli_service_logs_test.go
+++ b/integration-cli/docker_cli_service_logs_test.go
@@ -31,7 +31,7 @@
 	}
 
 	for name, message := range services {
-		out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox",
+		out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox",
 			"sh", "-c", fmt.Sprintf("echo %s; tail -f /dev/null", message))
 		c.Assert(err, checker.IsNil)
 		c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
@@ -74,7 +74,7 @@
 	name := "TestServiceLogsCompleteness"
 
 	// make a service that prints 6 lines
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 0 5); do echo log test $line; done; sleep 100000")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 0 5); do echo log test $line; done; sleep 100000")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -101,7 +101,7 @@
 	name := "TestServiceLogsTail"
 
 	// make a service that prints 6 lines
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 1 6); do echo log test $line; done; sleep 100000")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for line in $(seq 1 6); do echo log test $line; done; sleep 100000")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -125,7 +125,7 @@
 
 	name := "TestServiceLogsSince"
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do sleep .1; echo log$i; done; sleep 10000000")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "for i in $(seq 1 3); do sleep .1; echo log$i; done; sleep 10000000")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
@@ -159,7 +159,7 @@
 
 	name := "TestServiceLogsFollow"
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", "while true; do echo log test; sleep 0.1; done")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -172,6 +172,7 @@
 	cmd.Stdout = w
 	cmd.Stderr = w
 	c.Assert(cmd.Start(), checker.IsNil)
+	go cmd.Wait()
 
 	// Make sure pipe is written to
 	ch := make(chan *logMessage)
@@ -207,7 +208,7 @@
 
 	result := icmd.RunCmd(d.Command(
 		// create a service with the name
-		"service", "create", "--no-resolve-image", "--name", name,
+		"service", "create", "--detach", "--no-resolve-image", "--name", name,
 		// which has some number of replicas
 		fmt.Sprintf("--replicas=%v", replicas),
 		// which has this the task id as an environment variable templated in
@@ -259,7 +260,7 @@
 
 	result := icmd.RunCmd(d.Command(
 		// create a service
-		"service", "create", "--no-resolve-image",
+		"service", "create", "--detach", "--no-resolve-image",
 		// name it $name
 		"--name", name,
 		// use a TTY
@@ -297,7 +298,7 @@
 
 	result := icmd.RunCmd(d.Command(
 		// create a service
-		"service", "create", "--no-resolve-image",
+		"service", "create", "--detach", "--no-resolve-image",
 		// name it $name
 		"--name", name,
 		// busybox image, shell string
@@ -346,7 +347,7 @@
 
 	result := icmd.RunCmd(d.Command(
 		// create a service
-		"service", "create", "--no-resolve-image",
+		"service", "create", "--detach", "--no-resolve-image",
 		// name it $name
 		"--name", name,
 		// add an environment variable
diff --git a/integration-cli/docker_cli_service_scale_test.go b/integration-cli/docker_cli_service_scale_test.go
index 8fb84fe..41b49d6 100644
--- a/integration-cli/docker_cli_service_scale_test.go
+++ b/integration-cli/docker_cli_service_scale_test.go
@@ -14,11 +14,11 @@
 	d := s.AddDaemon(c, true, true)
 
 	service1Name := "TestService1"
-	service1Args := append([]string{"service", "create", "--no-resolve-image", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...)
+	service1Args := append([]string{"service", "create", "--detach", "--no-resolve-image", "--name", service1Name, defaultSleepImage}, sleepCommandForDaemonPlatform()...)
 
 	// global mode
 	service2Name := "TestService2"
-	service2Args := append([]string{"service", "create", "--no-resolve-image", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...)
+	service2Args := append([]string{"service", "create", "--detach", "--no-resolve-image", "--name", service2Name, "--mode=global", defaultSleepImage}, sleepCommandForDaemonPlatform()...)
 
 	// Create services
 	out, err := d.Cmd(service1Args...)
diff --git a/integration-cli/docker_cli_service_update_test.go b/integration-cli/docker_cli_service_update_test.go
index 086ae77..8f07170 100644
--- a/integration-cli/docker_cli_service_update_test.go
+++ b/integration-cli/docker_cli_service_update_test.go
@@ -15,7 +15,7 @@
 	d := s.AddDaemon(c, true, true)
 
 	serviceName := "TestServiceUpdatePort"
-	serviceArgs := append([]string{"service", "create", "--no-resolve-image", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...)
+	serviceArgs := append([]string{"service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, sleepCommandForDaemonPlatform()...)
 
 	// Create a service with a port mapping of 8080:8081.
 	out, err := d.Cmd(serviceArgs...)
@@ -23,7 +23,7 @@
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
 
 	// Update the service: changed the port mapping from 8080:8081 to 8082:8083.
-	_, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName)
+	_, err = d.Cmd("service", "update", "--detach", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName)
 	c.Assert(err, checker.IsNil)
 
 	// Inspect the service and verify port mapping
@@ -48,39 +48,39 @@
 
 func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) {
 	d := s.AddDaemon(c, true, true)
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name=test", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name=test", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	service := d.GetService(c, "test")
 	c.Assert(service.Spec.Labels, checker.HasLen, 0)
 
 	// add label to empty set
-	out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar")
+	out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo=bar")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	service = d.GetService(c, "test")
 	c.Assert(service.Spec.Labels, checker.HasLen, 1)
 	c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar")
 
 	// add label to non-empty set
-	out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar")
+	out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo2=bar")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	service = d.GetService(c, "test")
 	c.Assert(service.Spec.Labels, checker.HasLen, 2)
 	c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar")
 
-	out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2")
+	out, err = d.Cmd("service", "update", "--detach", "test", "--label-rm", "foo2")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	service = d.GetService(c, "test")
 	c.Assert(service.Spec.Labels, checker.HasLen, 1)
 	c.Assert(service.Spec.Labels["foo2"], checker.Equals, "")
 
-	out, err = d.Cmd("service", "update", "test", "--label-rm", "foo")
+	out, err = d.Cmd("service", "update", "--detach", "test", "--label-rm", "foo")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	service = d.GetService(c, "test")
 	c.Assert(service.Spec.Labels, checker.HasLen, 0)
 	c.Assert(service.Spec.Labels["foo"], checker.Equals, "")
 
 	// now make sure we can add again
-	out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar")
+	out, err = d.Cmd("service", "update", "--detach", "test", "--label-add", "foo=bar")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	service = d.GetService(c, "test")
 	c.Assert(service.Spec.Labels, checker.HasLen, 1)
@@ -100,11 +100,11 @@
 	testTarget := "testing"
 	serviceName := "test"
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// add secret
-	out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
+	out, err = d.CmdRetryOutOfSequence("service", "update", "--detach", "test", "--secret-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
@@ -119,7 +119,7 @@
 	c.Assert(refs[0].File.Name, checker.Equals, testTarget)
 
 	// remove
-	out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--secret-rm", testName)
+	out, err = d.CmdRetryOutOfSequence("service", "update", "--detach", "test", "--secret-rm", testName)
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Secrets }}", serviceName)
@@ -142,11 +142,11 @@
 	testTarget := "/testing"
 	serviceName := "test"
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// add config
-	out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--config-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
+	out, err = d.CmdRetryOutOfSequence("service", "update", "--detach", "test", "--config-add", fmt.Sprintf("source=%s,target=%s", testName, testTarget))
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName)
@@ -161,7 +161,7 @@
 	c.Assert(refs[0].File.Name, checker.Equals, testTarget)
 
 	// remove
-	out, err = d.CmdRetryOutOfSequence("service", "update", "test", "--config-rm", testName)
+	out, err = d.CmdRetryOutOfSequence("service", "update", "--detach", "test", "--config-rm", testName)
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.TaskTemplate.ContainerSpec.Configs }}", serviceName)
diff --git a/integration-cli/docker_cli_stats_test.go b/integration-cli/docker_cli_stats_test.go
index 9d40ce0..4548363 100644
--- a/integration-cli/docker_cli_stats_test.go
+++ b/integration-cli/docker_cli_stats_test.go
@@ -34,7 +34,7 @@
 	select {
 	case outerr := <-ch:
 		c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err))
-		c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output
+		c.Assert(string(outerr.out), checker.Contains, id[:12]) //running container wasn't present in output
 	case <-time.After(3 * time.Second):
 		statsCmd.Process.Kill()
 		c.Fatalf("stats did not return immediately when not streaming")
@@ -131,6 +131,7 @@
 	stdout, err := statsCmd.StdoutPipe()
 	c.Assert(err, check.IsNil)
 	c.Assert(statsCmd.Start(), check.IsNil)
+	go statsCmd.Wait()
 	defer statsCmd.Process.Kill()
 
 	go func() {
diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go
index 5ecb010..9488fff 100644
--- a/integration-cli/docker_cli_swarm_test.go
+++ b/integration-cli/docker_cli_swarm_test.go
@@ -169,8 +169,10 @@
 
 func (s *DockerSwarmSuite) TestSwarmServiceTemplatingHostname(c *check.C) {
 	d := s.AddDaemon(c, true, true)
+	hostname, err := d.Cmd("node", "inspect", "--format", "{{.Description.Hostname}}", "self")
+	c.Assert(err, checker.IsNil, check.Commentf(hostname))
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "test", "--hostname", "{{.Service.Name}}-{{.Task.Slot}}-{{.Node.Hostname}}", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// make sure task has been deployed.
@@ -179,7 +181,7 @@
 	containers := d.ActiveContainers()
 	out, err = d.Cmd("inspect", "--type", "container", "--format", "{{.Config.Hostname}}", containers[0])
 	c.Assert(err, checker.IsNil, check.Commentf(out))
-	c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1", check.Commentf("hostname with templating invalid"))
+	c.Assert(strings.Split(out, "\n")[0], checker.Equals, "test-1-"+strings.Split(hostname, "\n")[0], check.Commentf("hostname with templating invalid"))
 }
 
 // Test case for #24270
@@ -189,15 +191,15 @@
 	name1 := "redis-cluster-md5"
 	name2 := "redis-cluster"
 	name3 := "other-cluster"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name1, "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name1, "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name2, "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name2, "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name3, "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name3, "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -247,7 +249,7 @@
 	d := s.AddDaemon(c, true, true)
 
 	name := "redis-cluster-md5"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -274,17 +276,17 @@
 	d := s.AddDaemon(c, true, true)
 
 	name := "top"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--label", "x=y", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--label", "x=y", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
-	out, err = d.Cmd("service", "update", "--publish-add", "80:80", name)
+	out, err = d.Cmd("service", "update", "--detach", "--publish-add", "80:80", name)
 	c.Assert(err, checker.IsNil)
 
-	out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name)
+	out, err = d.CmdRetryOutOfSequence("service", "update", "--detach", "--publish-add", "80:80", name)
 	c.Assert(err, checker.IsNil)
 
-	out, err = d.CmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name)
+	out, err = d.CmdRetryOutOfSequence("service", "update", "--detach", "--publish-add", "80:80", "--publish-add", "80:20", name)
 	c.Assert(err, checker.NotNil)
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", name)
@@ -296,7 +298,7 @@
 	d := s.AddDaemon(c, true, true)
 
 	name := "top"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--user", "root:root", "--group", "wheel", "--group", "audio", "--group", "staff", "--group", "777", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -485,7 +487,7 @@
 	c.Assert(strings.TrimSpace(out), checker.Contains, "is already present")
 
 	// It cannot be removed if it is being used
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv1", "-p", "9000:8000", "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "srv1", "-p", "9000:8000", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	result = removeNetwork("new-ingress")
@@ -495,24 +497,24 @@
 	})
 
 	// But it can be removed once no more services depend on it
-	out, err = d.Cmd("service", "update", "--publish-rm", "9000:8000", "srv1")
+	out, err = d.Cmd("service", "update", "--detach", "--publish-rm", "9000:8000", "srv1")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	result = removeNetwork("new-ingress")
 	result.Assert(c, icmd.Success)
 
 	// A service which needs the ingress network cannot be created if no ingress is present
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv2", "-p", "500:500", "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "srv2", "-p", "500:500", "busybox", "top")
 	c.Assert(err, checker.NotNil)
 	c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present")
 
 	// An existing service cannot be updated to use the ingress nw if the nw is not present
-	out, err = d.Cmd("service", "update", "--publish-add", "9000:8000", "srv1")
+	out, err = d.Cmd("service", "update", "--detach", "--publish-add", "9000:8000", "srv1")
 	c.Assert(err, checker.NotNil)
 	c.Assert(strings.TrimSpace(out), checker.Contains, "no ingress network is present")
 
 	// But services which do not need routing mesh can be created regardless
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv3", "--endpoint-mode", "dnsrr", "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "srv3", "--endpoint-mode", "dnsrr", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 }
 
@@ -529,7 +531,7 @@
 	// Make sure nothing panics because ingress network is missing
 	out, err := d.Cmd("network", "create", "-d", "overlay", "another-network")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "srv4", "--network", "another-network", "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "srv4", "--network", "another-network", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 }
 
@@ -539,7 +541,7 @@
 	d := s.AddDaemon(c, true, true)
 
 	name := "redis-cluster-md5"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--replicas=3", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -573,7 +575,7 @@
 	c.Assert(out, checker.Not(checker.Contains), name+".3")
 
 	name = "redis-cluster-sha1"
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--mode=global", "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--mode=global", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -602,7 +604,7 @@
 	bareID := strings.TrimSpace(out)[:12]
 	// Create a service
 	name := "busybox-top"
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -821,7 +823,7 @@
 	c.Assert(err, checker.IsNil)
 
 	name := "worker"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--env-file", path, "--env", "VAR1=B", "--env", "VAR1=C", "--env", "VAR2=", "--env", "VAR2", "--name", name, "busybox", "top")
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
@@ -840,7 +842,7 @@
 
 	// Without --tty
 	expectedOutput := "none"
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", ttyCheck)
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "sh", "-c", ttyCheck)
 	c.Assert(err, checker.IsNil)
 
 	// Make sure task has been deployed.
@@ -863,7 +865,7 @@
 
 	// With --tty
 	expectedOutput = "TTY"
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck)
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--tty", "busybox", "sh", "-c", ttyCheck)
 	c.Assert(err, checker.IsNil)
 
 	// Make sure task has been deployed.
@@ -884,7 +886,7 @@
 
 	// Create a service
 	name := "top"
-	_, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top")
+	_, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "top")
 	c.Assert(err, checker.IsNil)
 
 	// Make sure task has been deployed.
@@ -894,7 +896,7 @@
 	c.Assert(err, checker.IsNil)
 	c.Assert(strings.TrimSpace(out), checker.Equals, "false")
 
-	_, err = d.Cmd("service", "update", "--tty", name)
+	_, err = d.Cmd("service", "update", "--detach", "--tty", name)
 	c.Assert(err, checker.IsNil)
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.TTY }}", name)
@@ -919,7 +921,7 @@
 
 	// Create a service
 	name := "top"
-	result = icmd.RunCmd(d.Command("service", "create", "--no-resolve-image", "--network", "foo", "--network", "bar", "--name", name, "busybox", "top"))
+	result = icmd.RunCmd(d.Command("service", "create", "--detach", "--no-resolve-image", "--network", "foo", "--network", "bar", "--name", name, "busybox", "top"))
 	result.Assert(c, icmd.Success)
 
 	// Make sure task has been deployed.
@@ -927,14 +929,14 @@
 		map[string]int{fooNetwork: 1, barNetwork: 1})
 
 	// Remove a network
-	result = icmd.RunCmd(d.Command("service", "update", "--network-rm", "foo", name))
+	result = icmd.RunCmd(d.Command("service", "update", "--detach", "--network-rm", "foo", name))
 	result.Assert(c, icmd.Success)
 
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals,
 		map[string]int{barNetwork: 1})
 
 	// Add a network
-	result = icmd.RunCmd(d.Command("service", "update", "--network-add", "baz", name))
+	result = icmd.RunCmd(d.Command("service", "update", "--detach", "--network-add", "baz", name))
 	result.Assert(c, icmd.Success)
 
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckRunningTaskNetworks, checker.DeepEquals,
@@ -946,7 +948,7 @@
 
 	// Create a service
 	name := "top"
-	_, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top")
+	_, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--dns=1.2.3.4", "--dns-search=example.com", "--dns-option=timeout:3", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 
 	// Make sure task has been deployed.
@@ -973,13 +975,13 @@
 
 	// Create a service
 	name := "top"
-	_, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "busybox", "top")
+	_, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "busybox", "top")
 	c.Assert(err, checker.IsNil)
 
 	// Make sure task has been deployed.
 	waitAndAssert(c, defaultReconciliationTimeout, d.CheckActiveContainerCount, checker.Equals, 1)
 
-	_, err = d.Cmd("service", "update", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name)
+	_, err = d.Cmd("service", "update", "--detach", "--dns-add=1.2.3.4", "--dns-search-add=example.com", "--dns-option-add=timeout:3", name)
 	c.Assert(err, checker.IsNil)
 
 	out, err := d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.DNSConfig }}", name)
@@ -1495,7 +1497,7 @@
 
 	// Create a service
 	name := "top"
-	_, err := d.Cmd("service", "create", "--no-resolve-image", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top")
+	_, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", name, "--host=example.com:1.2.3.4", "busybox", "top")
 	c.Assert(err, checker.IsNil)
 
 	// Make sure task has been deployed.
@@ -1543,9 +1545,10 @@
 
 	out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
-	c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]")
+	c.Assert(strings.TrimSpace(out), checker.Contains, "foo:bar")
+	c.Assert(strings.TrimSpace(out), checker.Contains, "com.docker.network.ipam.serial:true")
 
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--network=foo", "--name", "top", "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--network=foo", "--name", "top", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// make sure task has been deployed.
@@ -1553,7 +1556,8 @@
 
 	out, err = d.Cmd("network", "inspect", "--format", "{{.IPAM.Options}}", "foo")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
-	c.Assert(strings.TrimSpace(out), checker.Equals, "map[foo:bar]")
+	c.Assert(strings.TrimSpace(out), checker.Contains, "foo:bar")
+	c.Assert(strings.TrimSpace(out), checker.Contains, "com.docker.network.ipam.serial:true")
 }
 
 func (s *DockerTrustedSwarmSuite) TestTrustedServiceCreate(c *check.C) {
@@ -1563,7 +1567,7 @@
 	repoName := s.trustSuite.setupTrustedImage(c, "trusted-pull")
 
 	name := "trusted"
-	cli.Docker(cli.Args("-D", "service", "create", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{
+	cli.Docker(cli.Args("-D", "service", "create", "--detach", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{
 		Err: "resolved image tag to",
 	})
 
@@ -1580,7 +1584,7 @@
 	cli.DockerCmd(c, "rmi", repoName)
 
 	name = "untrusted"
-	cli.Docker(cli.Args("service", "create", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{
+	cli.Docker(cli.Args("service", "create", "--detach", "--no-resolve-image", "--name", name, repoName, "top"), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{
 		ExitCode: 1,
 		Err:      "Error: remote trust data does not exist",
 	})
@@ -1598,7 +1602,7 @@
 	name := "myservice"
 
 	// Create a service without content trust
-	cli.Docker(cli.Args("service", "create", "--no-resolve-image", "--name", name, repoName, "top"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success)
+	cli.Docker(cli.Args("service", "create", "--detach", "--no-resolve-image", "--name", name, repoName, "top"), cli.Daemon(d.Daemon)).Assert(c, icmd.Success)
 
 	result := cli.Docker(cli.Args("service", "inspect", "--pretty", name), cli.Daemon(d.Daemon))
 	c.Assert(result.Error, checker.IsNil, check.Commentf(result.Combined()))
@@ -1606,7 +1610,7 @@
 	// DOCKER_SERVICE_PREFER_OFFLINE_IMAGE.
 	c.Assert(result.Combined(), check.Not(checker.Contains), repoName+"@", check.Commentf(result.Combined()))
 
-	cli.Docker(cli.Args("-D", "service", "update", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{
+	cli.Docker(cli.Args("-D", "service", "update", "--detach", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{
 		Err: "resolved image tag to",
 	})
 
@@ -1622,7 +1626,7 @@
 	cli.DockerCmd(c, "push", repoName)
 	cli.DockerCmd(c, "rmi", repoName)
 
-	cli.Docker(cli.Args("service", "update", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{
+	cli.Docker(cli.Args("service", "update", "--detach", "--no-resolve-image", "--image", repoName, name), trustedCmd, cli.Daemon(d.Daemon)).Assert(c, icmd.Expected{
 		ExitCode: 1,
 		Err:      "Error: remote trust data does not exist",
 	})
@@ -1738,7 +1742,7 @@
 
 	d := s.AddDaemon(c, true, true)
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top", "--read-only", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top", "--read-only", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// make sure task has been deployed.
@@ -1825,7 +1829,7 @@
 
 	d := s.AddDaemon(c, true, true)
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top", "--stop-signal=SIGHUP", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top", "--stop-signal=SIGHUP", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// make sure task has been deployed.
@@ -1840,7 +1844,7 @@
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	c.Assert(strings.TrimSpace(out), checker.Equals, "SIGHUP")
 
-	out, err = d.Cmd("service", "update", "--stop-signal=SIGUSR1", "top")
+	out, err = d.Cmd("service", "update", "--detach", "--stop-signal=SIGUSR1", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.TaskTemplate.ContainerSpec.StopSignal }}", "top")
@@ -1851,11 +1855,11 @@
 func (s *DockerSwarmSuite) TestSwarmServiceLsFilterMode(c *check.C) {
 	d := s.AddDaemon(c, true, true)
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--name", "top1", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top1", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
-	out, err = d.Cmd("service", "create", "--no-resolve-image", "--name", "top2", "--mode=global", "busybox", "top")
+	out, err = d.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", "top2", "--mode=global", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 	c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "")
 
diff --git a/integration-cli/docker_cli_swarm_unix_test.go b/integration-cli/docker_cli_swarm_unix_test.go
index 91ddd5f..3b890bc 100644
--- a/integration-cli/docker_cli_swarm_unix_test.go
+++ b/integration-cli/docker_cli_swarm_unix_test.go
@@ -15,11 +15,11 @@
 func (s *DockerSwarmSuite) TestSwarmVolumePlugin(c *check.C) {
 	d := s.AddDaemon(c, true, true)
 
-	out, err := d.Cmd("service", "create", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top")
+	out, err := d.Cmd("service", "create", "--detach", "--no-resolve-image", "--mount", "type=volume,source=my-volume,destination=/foo,volume-driver=customvolumedriver", "--name", "top", "busybox", "top")
 	c.Assert(err, checker.IsNil, check.Commentf(out))
 
 	// Make sure task stays pending before plugin is available
-	waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceTasksInState("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1)
+	waitAndAssert(c, defaultReconciliationTimeout, d.CheckServiceTasksInStateWithError("top", swarm.TaskStatePending, "missing plugin on 1 node"), checker.Equals, 1)
 
 	plugin := newVolumePlugin(c, "customvolumedriver")
 	defer plugin.Close()
@@ -74,7 +74,7 @@
 
 	// create a global service to ensure that both nodes will have an instance
 	serviceName := "my-service"
-	_, err = d1.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, "busybox", "top")
+	_, err = d1.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, "busybox", "top")
 	c.Assert(err, checker.IsNil)
 
 	// wait for tasks ready
@@ -96,7 +96,7 @@
 
 	image := "busybox:latest"
 	// create a new global service again.
-	_, err = d1.Cmd("service", "create", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, image, "top")
+	_, err = d1.Cmd("service", "create", "--detach", "--no-resolve-image", "--name", serviceName, "--mode=global", "--network", networkName, image, "top")
 	c.Assert(err, checker.IsNil)
 
 	waitAndAssert(c, defaultReconciliationTimeout, d1.CheckRunningTaskImages, checker.DeepEquals,
diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go
index 907977f..ee94a9b 100644
--- a/integration-cli/docker_cli_tag_test.go
+++ b/integration-cli/docker_cli_tag_test.go
@@ -6,8 +6,8 @@
 
 	"github.com/docker/docker/integration-cli/checker"
 	"github.com/docker/docker/integration-cli/cli/build"
+	"github.com/docker/docker/internal/testutil"
 	"github.com/docker/docker/pkg/stringid"
-	"github.com/docker/docker/pkg/stringutils"
 	"github.com/go-check/check"
 )
 
@@ -34,7 +34,7 @@
 
 // ensure we don't allow the use of invalid tags; these tag operations should fail
 func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) {
-	longTag := stringutils.GenerateRandomAlphaOnlyString(121)
+	longTag := testutil.GenerateRandomAlphaOnlyString(121)
 
 	invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag}
 
diff --git a/integration-cli/docker_cli_update_unix_test.go b/integration-cli/docker_cli_update_unix_test.go
index 2b75941..0d2d30f 100644
--- a/integration-cli/docker_cli_update_unix_test.go
+++ b/integration-cli/docker_cli_update_unix_test.go
@@ -139,7 +139,7 @@
 func (s *DockerSuite) TestUpdateKernelMemoryUninitialized(c *check.C) {
 	testRequires(c, DaemonIsLinux, kernelMemorySupport)
 
-	isNewKernel := kernel.CheckKernelVersion(4, 6, 0)
+	isNewKernel := CheckKernelVersion(4, 6, 0)
 	name := "test-update-container"
 	dockerCmd(c, "run", "-d", "--name", name, "busybox", "top")
 	_, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name)
@@ -171,6 +171,18 @@
 	c.Assert(strings.TrimSpace(out), checker.Equals, "314572800")
 }
 
+// GetKernelVersion gets the current kernel version.
+func GetKernelVersion() *kernel.VersionInfo {
+	v, _ := kernel.ParseRelease(testEnv.DaemonInfo.KernelVersion)
+	return v
+}
+
+// CheckKernelVersion checks if current kernel is newer than (or equal to)
+// the given version.
+func CheckKernelVersion(k, major, minor int) bool {
+	return kernel.CompareKernelVersion(*GetKernelVersion(), kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) >= 0
+}
+
 func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) {
 	testRequires(c, DaemonIsLinux)
 	testRequires(c, memoryLimitSupport)
diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go
deleted file mode 100644
index 074a7db..0000000
--- a/integration-cli/docker_cli_version_test.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package main
-
-import (
-	"strings"
-
-	"github.com/docker/docker/integration-cli/checker"
-	"github.com/go-check/check"
-)
-
-// ensure docker version works
-func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) {
-	out, _ := dockerCmd(c, "version")
-	stringsToCheck := map[string]int{
-		"Client:":       1,
-		"Server:":       1,
-		" Version:":     2,
-		" API version:": 2,
-		" Go version:":  2,
-		" Git commit:":  2,
-		" OS/Arch:":     2,
-		" Built:":       2,
-	}
-
-	for k, v := range stringsToCheck {
-		c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out))
-	}
-}
-
-// ensure the Windows daemon return the correct platform string
-func (s *DockerSuite) TestVersionPlatform_w(c *check.C) {
-	testRequires(c, DaemonIsWindows)
-	testVersionPlatform(c, "windows/amd64")
-}
-
-// ensure the Linux daemon return the correct platform string
-func (s *DockerSuite) TestVersionPlatform_l(c *check.C) {
-	testRequires(c, DaemonIsLinux)
-	testVersionPlatform(c, "linux")
-}
-
-func testVersionPlatform(c *check.C, platform string) {
-	out, _ := dockerCmd(c, "version")
-	expected := "OS/Arch:      " + platform
-
-	split := strings.Split(out, "\n")
-	c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split)))
-
-	// Verify the second 'OS/Arch' matches the platform. Experimental has
-	// more lines of output than 'regular'
-	bFound := false
-	for i := 14; i < len(split); i++ {
-		if strings.Contains(split[i], expected) {
-			bFound = true
-			break
-		}
-	}
-	c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out))
-}
diff --git a/integration-cli/docker_cli_volume_test.go b/integration-cli/docker_cli_volume_test.go
index 3ca0834..fc930d3 100644
--- a/integration-cli/docker_cli_volume_test.go
+++ b/integration-cli/docker_cli_volume_test.go
@@ -64,9 +64,6 @@
 	})
 
 	out := result.Stdout()
-	outArr := strings.Split(strings.TrimSpace(out), "\n")
-	c.Assert(len(outArr), check.Equals, 3, check.Commentf("\n%s", out))
-
 	c.Assert(out, checker.Contains, "test1")
 	c.Assert(out, checker.Contains, "test2")
 	c.Assert(out, checker.Contains, "test3")
@@ -81,11 +78,8 @@
 	dockerCmd(c, "volume", "create", "soo")
 	dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/")
 
-	out, _ := dockerCmd(c, "volume", "ls")
-	outArr := strings.Split(strings.TrimSpace(out), "\n")
-	c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out))
-
-	assertVolList(c, out, []string{"aaa", "soo", "test"})
+	out, _ := dockerCmd(c, "volume", "ls", "-q")
+	assertVolumesInList(c, out, []string{"aaa", "soo", "test"})
 }
 
 func (s *DockerSuite) TestVolumeLsFormat(c *check.C) {
@@ -94,12 +88,7 @@
 	dockerCmd(c, "volume", "create", "soo")
 
 	out, _ := dockerCmd(c, "volume", "ls", "--format", "{{.Name}}")
-	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
-
-	expected := []string{"aaa", "soo", "test"}
-	var names []string
-	names = append(names, lines...)
-	c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names))
+	assertVolumesInList(c, out, []string{"aaa", "soo", "test"})
 }
 
 func (s *DockerSuite) TestVolumeLsFormatDefaultFormat(c *check.C) {
@@ -118,12 +107,7 @@
 	c.Assert(err, checker.IsNil)
 
 	out, _ := dockerCmd(c, "--config", d, "volume", "ls")
-	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
-
-	expected := []string{"aaa default", "soo default", "test default"}
-	var names []string
-	names = append(names, lines...)
-	c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names))
+	assertVolumesInList(c, out, []string{"aaa default", "soo default", "test default"})
 }
 
 // assertVolList checks volume retrieved with ls command
@@ -142,6 +126,20 @@
 	c.Assert(volList, checker.DeepEquals, expectVols)
 }
 
+func assertVolumesInList(c *check.C, out string, expected []string) {
+	lines := strings.Split(strings.TrimSpace(string(out)), "\n")
+	for _, expect := range expected {
+		found := false
+		for _, v := range lines {
+			found = v == expect
+			if found {
+				break
+			}
+		}
+		c.Assert(found, checker.Equals, true, check.Commentf("Expected volume not found: %v, got: %v", expect, lines))
+	}
+}
+
 func (s *DockerSuite) TestVolumeCLILsFilterDangling(c *check.C) {
 	prefix, _ := getPrefixAndSlashFromDaemonPlatform()
 	dockerCmd(c, "volume", "create", "testnotinuse1")
@@ -213,10 +211,6 @@
 	dockerCmd(c, "volume", "rm", id)
 	dockerCmd(c, "volume", "rm", "test")
 
-	out, _ = dockerCmd(c, "volume", "ls")
-	outArr := strings.Split(strings.TrimSpace(out), "\n")
-	c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out))
-
 	volumeID := "testing"
 	dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar")
 
@@ -407,10 +401,6 @@
 
 	dockerCmd(c, "volume", "rm", "-f", id)
 	dockerCmd(c, "volume", "rm", "--force", "nonexist")
-
-	out, _ = dockerCmd(c, "volume", "ls")
-	outArr := strings.Split(strings.TrimSpace(out), "\n")
-	c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out))
 }
 
 func (s *DockerSuite) TestVolumeCLIRmForce(c *check.C) {
diff --git a/integration-cli/docker_deprecated_api_v124_test.go b/integration-cli/docker_deprecated_api_v124_test.go
index edf3e57..214ae08 100644
--- a/integration-cli/docker_deprecated_api_v124_test.go
+++ b/integration-cli/docker_deprecated_api_v124_test.go
@@ -206,8 +206,10 @@
 	testRequires(c, DaemonIsLinux)
 	name := "test-host-config-links"
 	out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top")
+	defer dockerCmd(c, "stop", "link0")
 	id := strings.TrimSpace(out)
 	dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top")
+	defer dockerCmd(c, "stop", name)
 
 	hc := inspectFieldJSON(c, name, "HostConfig")
 	config := `{"HostConfig":` + hc + `}`
diff --git a/integration-cli/docker_hub_pull_suite_test.go b/integration-cli/docker_hub_pull_suite_test.go
index 2633720..286a391 100644
--- a/integration-cli/docker_hub_pull_suite_test.go
+++ b/integration-cli/docker_hub_pull_suite_test.go
@@ -39,7 +39,7 @@
 
 // SetUpSuite starts the suite daemon.
 func (s *DockerHubPullSuite) SetUpSuite(c *check.C) {
-	testRequires(c, DaemonIsLinux)
+	testRequires(c, DaemonIsLinux, SameHostDaemon)
 	s.d = daemon.New(c, dockerBinary, dockerdBinary, daemon.Config{
 		Experimental: testEnv.ExperimentalDaemon(),
 	})
diff --git a/integration-cli/docker_utils_test.go b/integration-cli/docker_utils_test.go
index 95d2e93..1bda2c7 100644
--- a/integration-cli/docker_utils_test.go
+++ b/integration-cli/docker_utils_test.go
@@ -6,7 +6,6 @@
 	"fmt"
 	"io"
 	"io/ioutil"
-	"net/http"
 	"os"
 	"path"
 	"path/filepath"
@@ -373,8 +372,7 @@
 }
 
 func getInspectBody(c *check.C, version, id string) []byte {
-	var httpClient *http.Client
-	cli, err := client.NewClient(daemonHost(), version, httpClient, nil)
+	cli, err := request.NewEnvClientWithVersion(version)
 	c.Assert(err, check.IsNil)
 	defer cli.Close()
 	_, body, err := cli.ContainerInspectWithRaw(context.Background(), id, false)
diff --git a/integration-cli/environment/environment.go b/integration-cli/environment/environment.go
index 4e04ba7..0decc06 100644
--- a/integration-cli/environment/environment.go
+++ b/integration-cli/environment/environment.go
@@ -67,9 +67,9 @@
 // decisions on how to configure themselves according to the platform
 // of the daemon. This is initialized in docker_utils by sending
 // a version call to the daemon and examining the response header.
-// Deprecated: use Execution.DaemonInfo.OSType
+// Deprecated: use Execution.OSType
 func (e *Execution) DaemonPlatform() string {
-	return e.DaemonInfo.OSType
+	return e.OSType
 }
 
 // MinimalBaseImage is the image used for minimal builds (it depends on the platform)
diff --git a/integration-cli/events_utils_test.go b/integration-cli/events_utils_test.go
index 5801889..356b2c3 100644
--- a/integration-cli/events_utils_test.go
+++ b/integration-cli/events_utils_test.go
@@ -69,7 +69,7 @@
 // Stop stops the events command.
 func (e *eventObserver) Stop() {
 	e.command.Process.Kill()
-	e.command.Process.Release()
+	e.command.Wait()
 }
 
 // Match tries to match the events output with a given matcher.
diff --git a/integration-cli/fixtures/https/ca.pem b/integration-cli/fixtures/https/ca.pem
new file mode 120000
index 0000000..70a3e6c
--- /dev/null
+++ b/integration-cli/fixtures/https/ca.pem
@@ -0,0 +1 @@
+../../../integration/testdata/https/ca.pem
\ No newline at end of file
diff --git a/integration-cli/fixtures/https/client-cert.pem b/integration-cli/fixtures/https/client-cert.pem
new file mode 120000
index 0000000..4588820
--- /dev/null
+++ b/integration-cli/fixtures/https/client-cert.pem
@@ -0,0 +1 @@
+../../../integration/testdata/https/client-cert.pem
\ No newline at end of file
diff --git a/integration-cli/fixtures/https/client-key.pem b/integration-cli/fixtures/https/client-key.pem
new file mode 120000
index 0000000..d5f6bbe
--- /dev/null
+++ b/integration-cli/fixtures/https/client-key.pem
@@ -0,0 +1 @@
+../../../integration/testdata/https/client-key.pem
\ No newline at end of file
diff --git a/integration-cli/fixtures/https/server-cert.pem b/integration-cli/fixtures/https/server-cert.pem
new file mode 120000
index 0000000..c186010
--- /dev/null
+++ b/integration-cli/fixtures/https/server-cert.pem
@@ -0,0 +1 @@
+../../../integration/testdata/https/server-cert.pem
\ No newline at end of file
diff --git a/integration-cli/fixtures/https/server-key.pem b/integration-cli/fixtures/https/server-key.pem
new file mode 120000
index 0000000..48b9c2d
--- /dev/null
+++ b/integration-cli/fixtures/https/server-key.pem
@@ -0,0 +1 @@
+../../../integration/testdata/https/server-key.pem
\ No newline at end of file
diff --git a/integration-cli/fixtures/plugin/plugin.go b/integration-cli/fixtures/plugin/plugin.go
index c8259be..0b13134 100644
--- a/integration-cli/fixtures/plugin/plugin.go
+++ b/integration-cli/fixtures/plugin/plugin.go
@@ -1,13 +1,23 @@
 package plugin
 
 import (
+	"encoding/json"
 	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"time"
 
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/plugin"
+	"github.com/docker/docker/registry"
+	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 )
 
-// CreateOpt is is passed used to change the defualt plugin config before
+// CreateOpt is is passed used to change the default plugin config before
 // creating it
 type CreateOpt func(*Config)
 
@@ -32,3 +42,142 @@
 type CreateClient interface {
 	PluginCreate(context.Context, io.Reader, types.PluginCreateOptions) error
 }
+
+// Create creates a new plugin with the specified name
+func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error {
+	tmpDir, err := ioutil.TempDir("", "create-test-plugin")
+	if err != nil {
+		return err
+	}
+	defer os.RemoveAll(tmpDir)
+
+	tar, err := makePluginBundle(tmpDir, opts...)
+	if err != nil {
+		return err
+	}
+	defer tar.Close()
+
+	ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
+	defer cancel()
+
+	return c.PluginCreate(ctx, tar, types.PluginCreateOptions{RepoName: name})
+}
+
+// CreateInRegistry makes a plugin (locally) and pushes it to a registry.
+// This does not use a dockerd instance to create or push the plugin.
+// If you just want to create a plugin in some daemon, use `Create`.
+//
+// This can be useful when testing plugins on swarm where you don't really want
+// the plugin to exist on any of the daemons (immediately) and there needs to be
+// some way to distribute the plugin.
+func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error {
+	tmpDir, err := ioutil.TempDir("", "create-test-plugin-local")
+	if err != nil {
+		return err
+	}
+	defer os.RemoveAll(tmpDir)
+
+	inPath := filepath.Join(tmpDir, "plugin")
+	if err := os.MkdirAll(inPath, 0755); err != nil {
+		return errors.Wrap(err, "error creating plugin root")
+	}
+
+	tar, err := makePluginBundle(inPath, opts...)
+	if err != nil {
+		return err
+	}
+	defer tar.Close()
+
+	dummyExec := func(m *plugin.Manager) (plugin.Executor, error) {
+		return nil, nil
+	}
+
+	regService, err := registry.NewService(registry.ServiceOptions{V2Only: true})
+	if err != nil {
+		return err
+	}
+
+	managerConfig := plugin.ManagerConfig{
+		Store:           plugin.NewStore(),
+		RegistryService: regService,
+		Root:            filepath.Join(tmpDir, "root"),
+		ExecRoot:        "/run/docker", // manager init fails if not set
+		CreateExecutor:  dummyExec,
+		LogPluginEvent:  func(id, name, action string) {}, // panics when not set
+	}
+	manager, err := plugin.NewManager(managerConfig)
+	if err != nil {
+		return errors.Wrap(err, "error creating plugin manager")
+	}
+
+	ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
+	defer cancel()
+	if err := manager.CreateFromContext(ctx, tar, &types.PluginCreateOptions{RepoName: repo}); err != nil {
+		return err
+	}
+
+	if auth == nil {
+		auth = &types.AuthConfig{}
+	}
+	err = manager.Push(ctx, repo, nil, auth, ioutil.Discard)
+	return errors.Wrap(err, "error pushing plugin")
+}
+
+func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) {
+	p := &types.PluginConfig{
+		Interface: types.PluginConfigInterface{
+			Socket: "basic.sock",
+			Types:  []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}},
+		},
+		Entrypoint: []string{"/basic"},
+	}
+	cfg := &Config{
+		PluginConfig: p,
+	}
+	for _, o := range opts {
+		o(cfg)
+	}
+	if cfg.binPath == "" {
+		binPath, err := ensureBasicPluginBin()
+		if err != nil {
+			return nil, err
+		}
+		cfg.binPath = binPath
+	}
+
+	configJSON, err := json.Marshal(p)
+	if err != nil {
+		return nil, err
+	}
+	if err := ioutil.WriteFile(filepath.Join(inPath, "config.json"), configJSON, 0644); err != nil {
+		return nil, err
+	}
+	if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(p.Entrypoint[0])), 0755); err != nil {
+		return nil, errors.Wrap(err, "error creating plugin rootfs dir")
+	}
+	if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil {
+		return nil, errors.Wrap(err, "error copying plugin binary to rootfs path")
+	}
+	tar, err := archive.Tar(inPath, archive.Uncompressed)
+	return tar, errors.Wrap(err, "error making plugin archive")
+}
+
+func ensureBasicPluginBin() (string, error) {
+	name := "docker-basic-plugin"
+	p, err := exec.LookPath(name)
+	if err == nil {
+		return p, nil
+	}
+
+	goBin, err := exec.LookPath("go")
+	if err != nil {
+		return "", err
+	}
+	installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name)
+	cmd := exec.Command(goBin, "build", "-o", installPath, "./"+filepath.Join("fixtures", "plugin", "basic"))
+	cmd.Env = append(cmd.Env, "CGO_ENABLED=0")
+	if out, err := cmd.CombinedOutput(); err != nil {
+		return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out))
+	}
+	return installPath, nil
+}
diff --git a/integration-cli/fixtures/plugin/plugin_linux.go b/integration-cli/fixtures/plugin/plugin_linux.go
deleted file mode 100644
index 757694c..0000000
--- a/integration-cli/fixtures/plugin/plugin_linux.go
+++ /dev/null
@@ -1,157 +0,0 @@
-package plugin
-
-import (
-	"encoding/json"
-	"io"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"time"
-
-	"github.com/docker/docker/api/types"
-	"github.com/docker/docker/libcontainerd"
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/plugin"
-	"github.com/docker/docker/registry"
-	"github.com/pkg/errors"
-	"golang.org/x/net/context"
-)
-
-// Create creates a new plugin with the specified name
-func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error {
-	tmpDir, err := ioutil.TempDir("", "create-test-plugin")
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpDir)
-
-	tar, err := makePluginBundle(tmpDir, opts...)
-	if err != nil {
-		return err
-	}
-	defer tar.Close()
-
-	ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
-	defer cancel()
-
-	return c.PluginCreate(ctx, tar, types.PluginCreateOptions{RepoName: name})
-}
-
-// TODO(@cpuguy83): we really shouldn't have to do this...
-// The manager panics on init when `Executor` is not set.
-type dummyExecutor struct{}
-
-func (dummyExecutor) Client(libcontainerd.Backend) (libcontainerd.Client, error) { return nil, nil }
-func (dummyExecutor) Cleanup()                                                   {}
-func (dummyExecutor) UpdateOptions(...libcontainerd.RemoteOption) error          { return nil }
-
-// CreateInRegistry makes a plugin (locally) and pushes it to a registry.
-// This does not use a dockerd instance to create or push the plugin.
-// If you just want to create a plugin in some daemon, use `Create`.
-//
-// This can be useful when testing plugins on swarm where you don't really want
-// the plugin to exist on any of the daemons (immediately) and there needs to be
-// some way to distribute the plugin.
-func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error {
-	tmpDir, err := ioutil.TempDir("", "create-test-plugin-local")
-	if err != nil {
-		return err
-	}
-	defer os.RemoveAll(tmpDir)
-
-	inPath := filepath.Join(tmpDir, "plugin")
-	if err := os.MkdirAll(inPath, 0755); err != nil {
-		return errors.Wrap(err, "error creating plugin root")
-	}
-
-	tar, err := makePluginBundle(inPath, opts...)
-	if err != nil {
-		return err
-	}
-	defer tar.Close()
-
-	managerConfig := plugin.ManagerConfig{
-		Store:           plugin.NewStore(),
-		RegistryService: registry.NewService(registry.ServiceOptions{V2Only: true}),
-		Root:            filepath.Join(tmpDir, "root"),
-		ExecRoot:        "/run/docker", // manager init fails if not set
-		Executor:        dummyExecutor{},
-		LogPluginEvent:  func(id, name, action string) {}, // panics when not set
-	}
-	manager, err := plugin.NewManager(managerConfig)
-	if err != nil {
-		return errors.Wrap(err, "error creating plugin manager")
-	}
-
-	ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
-	defer cancel()
-	if err := manager.CreateFromContext(ctx, tar, &types.PluginCreateOptions{RepoName: repo}); err != nil {
-		return err
-	}
-
-	if auth == nil {
-		auth = &types.AuthConfig{}
-	}
-	err = manager.Push(ctx, repo, nil, auth, ioutil.Discard)
-	return errors.Wrap(err, "error pushing plugin")
-}
-
-func makePluginBundle(inPath string, opts ...CreateOpt) (io.ReadCloser, error) {
-	p := &types.PluginConfig{
-		Interface: types.PluginConfigInterface{
-			Socket: "basic.sock",
-			Types:  []types.PluginInterfaceType{{Capability: "docker.dummy/1.0"}},
-		},
-		Entrypoint: []string{"/basic"},
-	}
-	cfg := &Config{
-		PluginConfig: p,
-	}
-	for _, o := range opts {
-		o(cfg)
-	}
-	if cfg.binPath == "" {
-		binPath, err := ensureBasicPluginBin()
-		if err != nil {
-			return nil, err
-		}
-		cfg.binPath = binPath
-	}
-
-	configJSON, err := json.Marshal(p)
-	if err != nil {
-		return nil, err
-	}
-	if err := ioutil.WriteFile(filepath.Join(inPath, "config.json"), configJSON, 0644); err != nil {
-		return nil, err
-	}
-	if err := os.MkdirAll(filepath.Join(inPath, "rootfs", filepath.Dir(p.Entrypoint[0])), 0755); err != nil {
-		return nil, errors.Wrap(err, "error creating plugin rootfs dir")
-	}
-	if err := archive.NewDefaultArchiver().CopyFileWithTar(cfg.binPath, filepath.Join(inPath, "rootfs", p.Entrypoint[0])); err != nil {
-		return nil, errors.Wrap(err, "error copying plugin binary to rootfs path")
-	}
-	tar, err := archive.Tar(inPath, archive.Uncompressed)
-	return tar, errors.Wrap(err, "error making plugin archive")
-}
-
-func ensureBasicPluginBin() (string, error) {
-	name := "docker-basic-plugin"
-	p, err := exec.LookPath(name)
-	if err == nil {
-		return p, nil
-	}
-
-	goBin, err := exec.LookPath("go")
-	if err != nil {
-		return "", err
-	}
-	installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name)
-	cmd := exec.Command(goBin, "build", "-o", installPath, "./"+filepath.Join("fixtures", "plugin", "basic"))
-	cmd.Env = append(cmd.Env, "CGO_ENABLED=0")
-	if out, err := cmd.CombinedOutput(); err != nil {
-		return "", errors.Wrapf(err, "error building basic plugin bin: %s", string(out))
-	}
-	return installPath, nil
-}
diff --git a/integration-cli/fixtures/plugin/plugin_unsuported.go b/integration-cli/fixtures/plugin/plugin_unsuported.go
deleted file mode 100644
index 7c272a3..0000000
--- a/integration-cli/fixtures/plugin/plugin_unsuported.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build !linux
-
-package plugin
-
-import (
-	"github.com/docker/docker/api/types"
-	"github.com/pkg/errors"
-	"golang.org/x/net/context"
-)
-
-// Create is not supported on this platform
-func Create(ctx context.Context, c CreateClient, name string, opts ...CreateOpt) error {
-	return errors.New("not supported on this platform")
-}
-
-// CreateInRegistry is not supported on this platform
-func CreateInRegistry(ctx context.Context, repo string, auth *types.AuthConfig, opts ...CreateOpt) error {
-	return errors.New("not supported on this platform")
-}
diff --git a/integration-cli/fixtures_linux_daemon_test.go b/integration-cli/fixtures_linux_daemon_test.go
index 1508762..6ac4511 100644
--- a/integration-cli/fixtures_linux_daemon_test.go
+++ b/integration-cli/fixtures_linux_daemon_test.go
@@ -57,7 +57,7 @@
 	}
 
 	if runtime.GOOS == "linux" && runtime.GOARCH == "amd64" {
-		out, err := exec.Command(gcc, "-s", "-m32", "-nostdlib", "../contrib/syscall-test/exit32.s", "-o", tmp+"/"+"exit32-test").CombinedOutput()
+		out, err := exec.Command(gcc, "-s", "-m32", "-nostdlib", "-static", "../contrib/syscall-test/exit32.s", "-o", tmp+"/"+"exit32-test").CombinedOutput()
 		c.Assert(err, checker.IsNil, check.Commentf(string(out)))
 	}
 
diff --git a/integration-cli/request/request.go b/integration-cli/request/request.go
index 72632f3..f22b08d 100644
--- a/integration-cli/request/request.go
+++ b/integration-cli/request/request.go
@@ -129,7 +129,11 @@
 		return nil, fmt.Errorf("could not create new request: %v", err)
 	}
 
-	req.URL.Scheme = "http"
+	if os.Getenv("DOCKER_TLS_VERIFY") != "" {
+		req.URL.Scheme = "https"
+	} else {
+		req.URL.Scheme = "http"
+	}
 	req.URL.Host = addr
 
 	for _, config := range modifiers {
@@ -319,3 +323,35 @@
 	}
 	return daemonURLStr
 }
+
+// NewEnvClientWithVersion returns a docker client with a specified version.
+// See: github.com/docker/docker/client `NewEnvClient()`
+func NewEnvClientWithVersion(version string) (*dclient.Client, error) {
+	if version == "" {
+		return nil, errors.New("version not specified")
+	}
+
+	var httpClient *http.Client
+	if os.Getenv("DOCKER_CERT_PATH") != "" {
+		tlsConfig, err := getTLSConfig()
+		if err != nil {
+			return nil, err
+		}
+		httpClient = &http.Client{
+			Transport: &http.Transport{
+				TLSClientConfig: tlsConfig,
+			},
+		}
+	}
+
+	host := os.Getenv("DOCKER_HOST")
+	if host == "" {
+		host = dclient.DefaultDockerHost
+	}
+
+	cli, err := dclient.NewClient(host, version, httpClient, nil)
+	if err != nil {
+		return cli, err
+	}
+	return cli, nil
+}
diff --git a/integration-cli/requirements_test.go b/integration-cli/requirements_test.go
index 0b10969..838977e 100644
--- a/integration-cli/requirements_test.go
+++ b/integration-cli/requirements_test.go
@@ -1,6 +1,7 @@
 package main
 
 import (
+	"context"
 	"fmt"
 	"io/ioutil"
 	"net/http"
@@ -10,6 +11,8 @@
 	"strings"
 	"time"
 
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/requirement"
 )
 
@@ -18,12 +21,12 @@
 }
 
 func DaemonIsWindows() bool {
-	return testEnv.DaemonInfo.OSType == "windows"
+	return testEnv.OSType == "windows"
 }
 
 func DaemonIsWindowsAtLeastBuild(buildNumber int) func() bool {
 	return func() bool {
-		if testEnv.DaemonInfo.OSType != "windows" {
+		if testEnv.OSType != "windows" {
 			return false
 		}
 		version := testEnv.DaemonInfo.KernelVersion
@@ -33,7 +36,19 @@
 }
 
 func DaemonIsLinux() bool {
-	return testEnv.DaemonInfo.OSType == "linux"
+	return testEnv.OSType == "linux"
+}
+
+func OnlyDefaultNetworks() bool {
+	cli, err := client.NewEnvClient()
+	if err != nil {
+		return false
+	}
+	networks, err := cli.NetworkList(context.TODO(), types.NetworkListOptions{})
+	if err != nil || len(networks) > 0 {
+		return false
+	}
+	return true
 }
 
 // Deprecated: use skip.IfCondition(t, !testEnv.DaemonInfo.ExperimentalBuild)
@@ -41,10 +56,6 @@
 	return testEnv.DaemonInfo.ExperimentalBuild
 }
 
-func NotExperimentalDaemon() bool {
-	return !testEnv.DaemonInfo.ExperimentalBuild
-}
-
 func IsAmd64() bool {
 	return os.Getenv("DOCKER_ENGINE_GOARCH") == "amd64"
 }
@@ -163,21 +174,21 @@
 }
 
 func IsPausable() bool {
-	if testEnv.DaemonInfo.OSType == "windows" {
+	if testEnv.OSType == "windows" {
 		return testEnv.DaemonInfo.Isolation == "hyperv"
 	}
 	return true
 }
 
 func NotPausable() bool {
-	if testEnv.DaemonInfo.OSType == "windows" {
+	if testEnv.OSType == "windows" {
 		return testEnv.DaemonInfo.Isolation == "process"
 	}
 	return false
 }
 
 func IsolationIs(expectedIsolation string) bool {
-	return testEnv.DaemonInfo.OSType == "windows" && string(testEnv.DaemonInfo.Isolation) == expectedIsolation
+	return testEnv.OSType == "windows" && string(testEnv.DaemonInfo.Isolation) == expectedIsolation
 }
 
 func IsolationIsHyperv() bool {
diff --git a/integration-cli/requirements_unix_test.go b/integration-cli/requirements_unix_test.go
index 6ef900f..7c594f7 100644
--- a/integration-cli/requirements_unix_test.go
+++ b/integration-cli/requirements_unix_test.go
@@ -18,19 +18,19 @@
 )
 
 func cpuCfsPeriod() bool {
-	return SysInfo.CPUCfsPeriod
+	return testEnv.DaemonInfo.CPUCfsPeriod
 }
 
 func cpuCfsQuota() bool {
-	return SysInfo.CPUCfsQuota
+	return testEnv.DaemonInfo.CPUCfsQuota
 }
 
 func cpuShare() bool {
-	return SysInfo.CPUShares
+	return testEnv.DaemonInfo.CPUShares
 }
 
 func oomControl() bool {
-	return SysInfo.OomKillDisable
+	return testEnv.DaemonInfo.OomKillDisable
 }
 
 func pidsLimit() bool {
@@ -38,11 +38,11 @@
 }
 
 func kernelMemorySupport() bool {
-	return SysInfo.KernelMemory
+	return testEnv.DaemonInfo.KernelMemory
 }
 
 func memoryLimitSupport() bool {
-	return SysInfo.MemoryLimit
+	return testEnv.DaemonInfo.MemoryLimit
 }
 
 func memoryReservationSupport() bool {
@@ -50,19 +50,19 @@
 }
 
 func swapMemorySupport() bool {
-	return SysInfo.SwapLimit
+	return testEnv.DaemonInfo.SwapLimit
 }
 
 func memorySwappinessSupport() bool {
-	return SysInfo.MemorySwappiness
+	return SameHostDaemon() && SysInfo.MemorySwappiness
 }
 
 func blkioWeight() bool {
-	return SysInfo.BlkioWeight
+	return SameHostDaemon() && SysInfo.BlkioWeight
 }
 
 func cgroupCpuset() bool {
-	return SysInfo.Cpuset
+	return testEnv.DaemonInfo.CPUSet
 }
 
 func seccompEnabled() bool {
@@ -111,5 +111,7 @@
 }
 
 func init() {
-	SysInfo = sysinfo.New(true)
+	if SameHostDaemon() {
+		SysInfo = sysinfo.New(true)
+	}
 }
diff --git a/integration-cli/utils_test.go b/integration-cli/utils_test.go
index e09fb80..1146e1b 100644
--- a/integration-cli/utils_test.go
+++ b/integration-cli/utils_test.go
@@ -7,7 +7,8 @@
 	"path/filepath"
 	"strings"
 
-	"github.com/docker/docker/pkg/stringutils"
+	"github.com/docker/docker/internal/testutil"
+	"github.com/go-check/check"
 	"github.com/gotestyourself/gotestyourself/icmd"
 	"github.com/pkg/errors"
 )
@@ -59,7 +60,7 @@
 	if platform == "windows" {
 		tmp = os.Getenv("TEMP")
 	}
-	path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10)))
+	path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, testutil.GenerateRandomAlphaOnlyString(10)))
 	if platform == "windows" {
 		return filepath.FromSlash(path) // Using \
 	}
@@ -112,3 +113,71 @@
 	out, err := cmds[len(cmds)-1].CombinedOutput()
 	return string(out), err
 }
+
+type elementListOptions struct {
+	element, format string
+}
+
+func existingElements(c *check.C, opts elementListOptions) []string {
+	args := []string{}
+	switch opts.element {
+	case "container":
+		args = append(args, "ps", "-a")
+	case "image":
+		args = append(args, "images", "-a")
+	case "network":
+		args = append(args, "network", "ls")
+	case "plugin":
+		args = append(args, "plugin", "ls")
+	case "volume":
+		args = append(args, "volume", "ls")
+	}
+	if opts.format != "" {
+		args = append(args, "--format", opts.format)
+	}
+	out, _ := dockerCmd(c, args...)
+	lines := []string{}
+	for _, l := range strings.Split(out, "\n") {
+		if l != "" {
+			lines = append(lines, l)
+		}
+	}
+	return lines
+}
+
+// ExistingContainerIDs returns a list of currently existing container IDs.
+func ExistingContainerIDs(c *check.C) []string {
+	return existingElements(c, elementListOptions{element: "container", format: "{{.ID}}"})
+}
+
+// ExistingContainerNames returns a list of existing container names.
+func ExistingContainerNames(c *check.C) []string {
+	return existingElements(c, elementListOptions{element: "container", format: "{{.Names}}"})
+}
+
+// RemoveLinesForExistingElements removes existing elements from the output of a
+// docker command.
+// This function takes an output []string and returns a []string.
+func RemoveLinesForExistingElements(output, existing []string) []string {
+	for _, e := range existing {
+		index := -1
+		for i, line := range output {
+			if strings.Contains(line, e) {
+				index = i
+				break
+			}
+		}
+		if index != -1 {
+			output = append(output[:index], output[index+1:]...)
+		}
+	}
+	return output
+}
+
+// RemoveOutputForExistingElements removes existing elements from the output of
+// a docker command.
+// This function takes an output string and returns a string.
+func RemoveOutputForExistingElements(output string, existing []string) string {
+	res := RemoveLinesForExistingElements(strings.Split(output, "\n"), existing)
+	return strings.Join(res, "\n")
+}
diff --git a/integration/build/build_test.go b/integration/build/build_test.go
new file mode 100644
index 0000000..b04cbbf
--- /dev/null
+++ b/integration/build/build_test.go
@@ -0,0 +1,131 @@
+package build
+
+import (
+	"archive/tar"
+	"bytes"
+	"context"
+	"encoding/json"
+	"io"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
+	"github.com/docker/docker/integration/util/request"
+	"github.com/docker/docker/pkg/jsonmessage"
+	"github.com/stretchr/testify/require"
+)
+
+func TestBuildWithRemoveAndForceRemove(t *testing.T) {
+	defer setupTest(t)()
+	t.Parallel()
+	cases := []struct {
+		name                           string
+		dockerfile                     string
+		numberOfIntermediateContainers int
+		rm                             bool
+		forceRm                        bool
+	}{
+		{
+			name: "successful build with no removal",
+			dockerfile: `FROM busybox
+			RUN exit 0
+			RUN exit 0`,
+			numberOfIntermediateContainers: 2,
+			rm:      false,
+			forceRm: false,
+		},
+		{
+			name: "successful build with remove",
+			dockerfile: `FROM busybox
+			RUN exit 0
+			RUN exit 0`,
+			numberOfIntermediateContainers: 0,
+			rm:      true,
+			forceRm: false,
+		},
+		{
+			name: "successful build with remove and force remove",
+			dockerfile: `FROM busybox
+			RUN exit 0
+			RUN exit 0`,
+			numberOfIntermediateContainers: 0,
+			rm:      true,
+			forceRm: true,
+		},
+		{
+			name: "failed build with no removal",
+			dockerfile: `FROM busybox
+			RUN exit 0
+			RUN exit 1`,
+			numberOfIntermediateContainers: 2,
+			rm:      false,
+			forceRm: false,
+		},
+		{
+			name: "failed build with remove",
+			dockerfile: `FROM busybox
+			RUN exit 0
+			RUN exit 1`,
+			numberOfIntermediateContainers: 1,
+			rm:      true,
+			forceRm: false,
+		},
+		{
+			name: "failed build with remove and force remove",
+			dockerfile: `FROM busybox
+			RUN exit 0
+			RUN exit 1`,
+			numberOfIntermediateContainers: 0,
+			rm:      true,
+			forceRm: true,
+		},
+	}
+
+	client := request.NewAPIClient(t)
+	ctx := context.Background()
+	for _, c := range cases {
+		t.Run(c.name, func(t *testing.T) {
+			t.Parallel()
+			dockerfile := []byte(c.dockerfile)
+
+			buff := bytes.NewBuffer(nil)
+			tw := tar.NewWriter(buff)
+			require.NoError(t, tw.WriteHeader(&tar.Header{
+				Name: "Dockerfile",
+				Size: int64(len(dockerfile)),
+			}))
+			_, err := tw.Write(dockerfile)
+			require.NoError(t, err)
+			require.NoError(t, tw.Close())
+			resp, err := client.ImageBuild(ctx, buff, types.ImageBuildOptions{Remove: c.rm, ForceRemove: c.forceRm, NoCache: true})
+			require.NoError(t, err)
+			defer resp.Body.Close()
+			filter, err := buildContainerIdsFilter(resp.Body)
+			require.NoError(t, err)
+			remainingContainers, err := client.ContainerList(ctx, types.ContainerListOptions{Filters: filter, All: true})
+			require.NoError(t, err)
+			require.Equal(t, c.numberOfIntermediateContainers, len(remainingContainers), "Expected %v remaining intermediate containers, got %v", c.numberOfIntermediateContainers, len(remainingContainers))
+		})
+	}
+}
+
+func buildContainerIdsFilter(buildOutput io.Reader) (filters.Args, error) {
+	const intermediateContainerPrefix = " ---> Running in "
+	filter := filters.NewArgs()
+
+	dec := json.NewDecoder(buildOutput)
+	for {
+		m := jsonmessage.JSONMessage{}
+		err := dec.Decode(&m)
+		if err == io.EOF {
+			return filter, nil
+		}
+		if err != nil {
+			return filter, err
+		}
+		if ix := strings.Index(m.Stream, intermediateContainerPrefix); ix != -1 {
+			filter.Add("id", strings.TrimSpace(m.Stream[ix+len(intermediateContainerPrefix):]))
+		}
+	}
+}
diff --git a/integration/build/main_test.go b/integration/build/main_test.go
new file mode 100644
index 0000000..113766a
--- /dev/null
+++ b/integration/build/main_test.go
@@ -0,0 +1,33 @@
+package build
+
+import (
+	"fmt"
+	"os"
+	"testing"
+
+	"github.com/docker/docker/internal/test/environment"
+)
+
+var testEnv *environment.Execution
+
+func TestMain(m *testing.M) {
+	var err error
+	testEnv, err = environment.New()
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+	err = environment.EnsureFrozenImagesLinux(testEnv)
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+
+	testEnv.Print()
+	os.Exit(m.Run())
+}
+
+func setupTest(t *testing.T) func() {
+	environment.ProtectAll(t, testEnv)
+	return func() { testEnv.Clean(t) }
+}
diff --git a/integration/container/main_test.go b/integration/container/main_test.go
index 1c4e078..9854004 100644
--- a/integration/container/main_test.go
+++ b/integration/container/main_test.go
@@ -17,12 +17,17 @@
 		fmt.Println(err)
 		os.Exit(1)
 	}
+	err = environment.EnsureFrozenImagesLinux(testEnv)
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
 
 	testEnv.Print()
 	os.Exit(m.Run())
 }
 
 func setupTest(t *testing.T) func() {
-	environment.ProtectImages(t, testEnv)
+	environment.ProtectAll(t, testEnv)
 	return func() { testEnv.Clean(t) }
 }
diff --git a/integration/container/rename_test.go b/integration/container/rename_test.go
new file mode 100644
index 0000000..cf36757
--- /dev/null
+++ b/integration/container/rename_test.go
@@ -0,0 +1,88 @@
+package container
+
+import (
+	"context"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/api/types/network"
+	"github.com/docker/docker/api/types/strslice"
+	"github.com/docker/docker/client"
+	"github.com/docker/docker/integration/util/request"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func runContainer(ctx context.Context, t *testing.T, client client.APIClient, cntCfg *container.Config, hstCfg *container.HostConfig, nwkCfg *network.NetworkingConfig, cntName string) string {
+	cnt, err := client.ContainerCreate(ctx, cntCfg, hstCfg, nwkCfg, cntName)
+	require.NoError(t, err)
+
+	err = client.ContainerStart(ctx, cnt.ID, types.ContainerStartOptions{})
+	require.NoError(t, err)
+	return cnt.ID
+}
+
+// This test simulates the scenario mentioned in #31392:
+// Having two linked container, renaming the target and bringing a replacement
+// and then deleting and recreating the source container linked to the new target.
+// This checks that "rename" updates source container correctly and doesn't set it to null.
+func TestRenameLinkedContainer(t *testing.T) {
+	defer setupTest(t)()
+	ctx := context.Background()
+	client := request.NewAPIClient(t)
+
+	cntConfig := &container.Config{
+		Image: "busybox",
+		Tty:   true,
+		Cmd:   strslice.StrSlice([]string{"top"}),
+	}
+
+	var (
+		aID, bID string
+		cntJSON  types.ContainerJSON
+		err      error
+	)
+
+	aID = runContainer(ctx, t, client,
+		cntConfig,
+		&container.HostConfig{},
+		&network.NetworkingConfig{},
+		"a0",
+	)
+
+	bID = runContainer(ctx, t, client,
+		cntConfig,
+		&container.HostConfig{
+			Links: []string{"a0"},
+		},
+		&network.NetworkingConfig{},
+		"b0",
+	)
+
+	err = client.ContainerRename(ctx, aID, "a1")
+	require.NoError(t, err)
+
+	runContainer(ctx, t, client,
+		cntConfig,
+		&container.HostConfig{},
+		&network.NetworkingConfig{},
+		"a0",
+	)
+
+	err = client.ContainerRemove(ctx, bID, types.ContainerRemoveOptions{Force: true})
+	require.NoError(t, err)
+
+	bID = runContainer(ctx, t, client,
+		cntConfig,
+		&container.HostConfig{
+			Links: []string{"a0"},
+		},
+		&network.NetworkingConfig{},
+		"b0",
+	)
+
+	cntJSON, err = client.ContainerInspect(ctx, bID)
+	require.NoError(t, err)
+	assert.Equal(t, []string{"/a0:/b0/a0"}, cntJSON.HostConfig.Links)
+}
diff --git a/integration/image/import_test.go b/integration/image/import_test.go
new file mode 100644
index 0000000..955891f
--- /dev/null
+++ b/integration/image/import_test.go
@@ -0,0 +1,36 @@
+package image
+
+import (
+	"archive/tar"
+	"bytes"
+	"context"
+	"io"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/integration/util/request"
+	"github.com/docker/docker/internal/testutil"
+)
+
+// Ensure we don't regress on CVE-2017-14992.
+func TestImportExtremelyLargeImageWorks(t *testing.T) {
+	client := request.NewAPIClient(t)
+
+	// Construct an empty tar archive with about 8GB of junk padding at the
+	// end. This should not cause any crashes (the padding should be mostly
+	// ignored).
+	var tarBuffer bytes.Buffer
+	tw := tar.NewWriter(&tarBuffer)
+	if err := tw.Close(); err != nil {
+		t.Fatal(err)
+	}
+	imageRdr := io.MultiReader(&tarBuffer, io.LimitReader(testutil.DevZero, 8*1024*1024*1024))
+
+	_, err := client.ImageImport(context.Background(),
+		types.ImageImportSource{Source: imageRdr, SourceName: "-"},
+		"test1234:v42",
+		types.ImageImportOptions{})
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/integration/network/delete_test.go b/integration/network/delete_test.go
new file mode 100644
index 0000000..0a3b219
--- /dev/null
+++ b/integration/network/delete_test.go
@@ -0,0 +1,72 @@
+package network
+
+import (
+	"context"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/integration/util/request"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
+
+func containsNetwork(nws []types.NetworkResource, nw types.NetworkCreateResponse) bool {
+	for _, n := range nws {
+		if n.ID == nw.ID {
+			return true
+		}
+	}
+	return false
+}
+
+// createAmbiguousNetworks creates three networks, of which the second network
+// uses a prefix of the first network's ID as name. The third network uses the
+// first network's ID as name.
+//
+// After successful creation, properties of all three networks is returned
+func createAmbiguousNetworks(t *testing.T) (types.NetworkCreateResponse, types.NetworkCreateResponse, types.NetworkCreateResponse) {
+	client := request.NewAPIClient(t)
+	ctx := context.Background()
+
+	testNet, err := client.NetworkCreate(ctx, "testNet", types.NetworkCreate{})
+	require.NoError(t, err)
+	idPrefixNet, err := client.NetworkCreate(ctx, testNet.ID[:12], types.NetworkCreate{})
+	require.NoError(t, err)
+	fullIDNet, err := client.NetworkCreate(ctx, testNet.ID, types.NetworkCreate{})
+	require.NoError(t, err)
+
+	nws, err := client.NetworkList(ctx, types.NetworkListOptions{})
+	require.NoError(t, err)
+
+	assert.Equal(t, true, containsNetwork(nws, testNet), "failed to create network testNet")
+	assert.Equal(t, true, containsNetwork(nws, idPrefixNet), "failed to create network idPrefixNet")
+	assert.Equal(t, true, containsNetwork(nws, fullIDNet), "failed to create network fullIDNet")
+	return testNet, idPrefixNet, fullIDNet
+}
+
+// TestDockerNetworkDeletePreferID tests that if a network with a name
+// equal to another network's ID exists, the Network with the given
+// ID is removed, and not the network with the given name.
+func TestDockerNetworkDeletePreferID(t *testing.T) {
+	defer setupTest(t)()
+	client := request.NewAPIClient(t)
+	ctx := context.Background()
+	testNet, idPrefixNet, fullIDNet := createAmbiguousNetworks(t)
+
+	// Delete the network using a prefix of the first network's ID as name.
+	// This should the network name with the id-prefix, not the original network.
+	err := client.NetworkRemove(ctx, testNet.ID[:12])
+	require.NoError(t, err)
+
+	// Delete the network using networkID. This should remove the original
+	// network, not the network with the name equal to the networkID
+	err = client.NetworkRemove(ctx, testNet.ID)
+	require.NoError(t, err)
+
+	// networks "testNet" and "idPrefixNet" should be removed, but "fullIDNet" should still exist
+	nws, err := client.NetworkList(ctx, types.NetworkListOptions{})
+	require.NoError(t, err)
+	assert.Equal(t, false, containsNetwork(nws, testNet), "Network testNet not removed")
+	assert.Equal(t, false, containsNetwork(nws, idPrefixNet), "Network idPrefixNet not removed")
+	assert.Equal(t, true, containsNetwork(nws, fullIDNet), "Network fullIDNet not found")
+}
diff --git a/integration/network/main_test.go b/integration/network/main_test.go
new file mode 100644
index 0000000..9729c3e
--- /dev/null
+++ b/integration/network/main_test.go
@@ -0,0 +1,33 @@
+package network
+
+import (
+	"fmt"
+	"os"
+	"testing"
+
+	"github.com/docker/docker/internal/test/environment"
+)
+
+var testEnv *environment.Execution
+
+func TestMain(m *testing.M) {
+	var err error
+	testEnv, err = environment.New()
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+	err = environment.EnsureFrozenImagesLinux(testEnv)
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+
+	testEnv.Print()
+	os.Exit(m.Run())
+}
+
+func setupTest(t *testing.T) func() {
+	environment.ProtectAll(t, testEnv)
+	return func() { testEnv.Clean(t) }
+}
diff --git a/integration/plugin/authz/authz_plugin_test.go b/integration/plugin/authz/authz_plugin_test.go
new file mode 100644
index 0000000..0f46d8f
--- /dev/null
+++ b/integration/plugin/authz/authz_plugin_test.go
@@ -0,0 +1,467 @@
+// +build !windows
+
+package authz
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"net/http"
+	"net/http/httputil"
+	"net/url"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"testing"
+	"time"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	eventtypes "github.com/docker/docker/api/types/events"
+	networktypes "github.com/docker/docker/api/types/network"
+	"github.com/docker/docker/client"
+	"github.com/docker/docker/integration/util/request"
+	"github.com/docker/docker/internal/test/environment"
+	"github.com/docker/docker/pkg/authorization"
+	"github.com/gotestyourself/gotestyourself/skip"
+	"github.com/stretchr/testify/require"
+)
+
+const (
+	testAuthZPlugin     = "authzplugin"
+	unauthorizedMessage = "User unauthorized authz plugin"
+	errorMessage        = "something went wrong..."
+	serverVersionAPI    = "/version"
+)
+
+var (
+	alwaysAllowed = []string{"/_ping", "/info"}
+	ctrl          *authorizationController
+)
+
+type authorizationController struct {
+	reqRes          authorization.Response // reqRes holds the plugin response to the initial client request
+	resRes          authorization.Response // resRes holds the plugin response to the daemon response
+	versionReqCount int                    // versionReqCount counts the number of requests to the server version API endpoint
+	versionResCount int                    // versionResCount counts the number of responses from the server version API endpoint
+	requestsURIs    []string               // requestsURIs stores all request URIs that are sent to the authorization controller
+	reqUser         string
+	resUser         string
+}
+
+func setupTestV1(t *testing.T) func() {
+	ctrl = &authorizationController{}
+	teardown := setupTest(t)
+
+	err := os.MkdirAll("/etc/docker/plugins", 0755)
+	require.Nil(t, err)
+
+	fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin)
+	err = ioutil.WriteFile(fileName, []byte(server.URL), 0644)
+	require.Nil(t, err)
+
+	return func() {
+		err := os.RemoveAll("/etc/docker/plugins")
+		require.Nil(t, err)
+
+		teardown()
+		ctrl = nil
+	}
+}
+
+// check for always allowed endpoints to not inhibit test framework functions
+func isAllowed(reqURI string) bool {
+	for _, endpoint := range alwaysAllowed {
+		if strings.HasSuffix(reqURI, endpoint) {
+			return true
+		}
+	}
+	return false
+}
+
+func TestAuthZPluginAllowRequest(t *testing.T) {
+	defer setupTestV1(t)()
+	ctrl.reqRes.Allow = true
+	ctrl.resRes.Allow = true
+	d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin)
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Ensure command successful
+	createResponse, err := client.ContainerCreate(context.Background(), &container.Config{Cmd: []string{"top"}, Image: "busybox"}, &container.HostConfig{}, &networktypes.NetworkingConfig{}, "")
+	require.Nil(t, err)
+
+	err = client.ContainerStart(context.Background(), createResponse.ID, types.ContainerStartOptions{})
+	require.Nil(t, err)
+
+	assertURIRecorded(t, ctrl.requestsURIs, "/containers/create")
+	assertURIRecorded(t, ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", createResponse.ID))
+
+	_, err = client.ServerVersion(context.Background())
+	require.Nil(t, err)
+	require.Equal(t, 1, ctrl.versionReqCount)
+	require.Equal(t, 1, ctrl.versionResCount)
+}
+
+func TestAuthZPluginTLS(t *testing.T) {
+	defer setupTestV1(t)()
+	const (
+		testDaemonHTTPSAddr = "tcp://localhost:4271"
+		cacertPath          = "../../testdata/https/ca.pem"
+		serverCertPath      = "../../testdata/https/server-cert.pem"
+		serverKeyPath       = "../../testdata/https/server-key.pem"
+		clientCertPath      = "../../testdata/https/client-cert.pem"
+		clientKeyPath       = "../../testdata/https/client-key.pem"
+	)
+
+	d.Start(t,
+		"--authorization-plugin="+testAuthZPlugin,
+		"--tlsverify",
+		"--tlscacert", cacertPath,
+		"--tlscert", serverCertPath,
+		"--tlskey", serverKeyPath,
+		"-H", testDaemonHTTPSAddr)
+
+	ctrl.reqRes.Allow = true
+	ctrl.resRes.Allow = true
+
+	client, err := request.NewTLSAPIClient(t, testDaemonHTTPSAddr, cacertPath, clientCertPath, clientKeyPath)
+	require.Nil(t, err)
+
+	_, err = client.ServerVersion(context.Background())
+	require.Nil(t, err)
+
+	require.Equal(t, "client", ctrl.reqUser)
+	require.Equal(t, "client", ctrl.resUser)
+}
+
+func TestAuthZPluginDenyRequest(t *testing.T) {
+	defer setupTestV1(t)()
+	d.Start(t, "--authorization-plugin="+testAuthZPlugin)
+	ctrl.reqRes.Allow = false
+	ctrl.reqRes.Msg = unauthorizedMessage
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Ensure command is blocked
+	_, err = client.ServerVersion(context.Background())
+	require.NotNil(t, err)
+	require.Equal(t, 1, ctrl.versionReqCount)
+	require.Equal(t, 0, ctrl.versionResCount)
+
+	// Ensure unauthorized message appears in response
+	require.Equal(t, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s", testAuthZPlugin, unauthorizedMessage), err.Error())
+}
+
+// TestAuthZPluginAPIDenyResponse validates that when authorization
+// plugin deny the request, the status code is forbidden
+func TestAuthZPluginAPIDenyResponse(t *testing.T) {
+	defer setupTestV1(t)()
+	d.Start(t, "--authorization-plugin="+testAuthZPlugin)
+	ctrl.reqRes.Allow = false
+	ctrl.resRes.Msg = unauthorizedMessage
+
+	daemonURL, err := url.Parse(d.Sock())
+	require.Nil(t, err)
+
+	conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10)
+	require.Nil(t, err)
+	client := httputil.NewClientConn(conn, nil)
+	req, err := http.NewRequest("GET", "/version", nil)
+	require.Nil(t, err)
+	resp, err := client.Do(req)
+
+	require.Nil(t, err)
+	require.Equal(t, http.StatusForbidden, resp.StatusCode)
+}
+
+func TestAuthZPluginDenyResponse(t *testing.T) {
+	defer setupTestV1(t)()
+	d.Start(t, "--authorization-plugin="+testAuthZPlugin)
+	ctrl.reqRes.Allow = true
+	ctrl.resRes.Allow = false
+	ctrl.resRes.Msg = unauthorizedMessage
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Ensure command is blocked
+	_, err = client.ServerVersion(context.Background())
+	require.NotNil(t, err)
+	require.Equal(t, 1, ctrl.versionReqCount)
+	require.Equal(t, 1, ctrl.versionResCount)
+
+	// Ensure unauthorized message appears in response
+	require.Equal(t, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s", testAuthZPlugin, unauthorizedMessage), err.Error())
+}
+
+// TestAuthZPluginAllowEventStream verifies event stream propagates
+// correctly after request pass through by the authorization plugin
+func TestAuthZPluginAllowEventStream(t *testing.T) {
+	skip.IfCondition(t, testEnv.DaemonInfo.OSType != "linux")
+
+	defer setupTestV1(t)()
+	ctrl.reqRes.Allow = true
+	ctrl.resRes.Allow = true
+	d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin)
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	startTime := strconv.FormatInt(systemTime(t, client, testEnv).Unix(), 10)
+	events, errs, cancel := systemEventsSince(client, startTime)
+	defer cancel()
+
+	// Create a container and wait for the creation events
+	createResponse, err := client.ContainerCreate(context.Background(), &container.Config{Cmd: []string{"top"}, Image: "busybox"}, &container.HostConfig{}, &networktypes.NetworkingConfig{}, "")
+	require.Nil(t, err)
+
+	err = client.ContainerStart(context.Background(), createResponse.ID, types.ContainerStartOptions{})
+	require.Nil(t, err)
+
+	for i := 0; i < 100; i++ {
+		c, err := client.ContainerInspect(context.Background(), createResponse.ID)
+		require.Nil(t, err)
+		if c.State.Running {
+			break
+		}
+		if i == 99 {
+			t.Fatal("Container didn't run within 10s")
+		}
+		time.Sleep(100 * time.Millisecond)
+	}
+
+	created := false
+	started := false
+	for !created && !started {
+		select {
+		case event := <-events:
+			if event.Type == eventtypes.ContainerEventType && event.Actor.ID == createResponse.ID {
+				if event.Action == "create" {
+					created = true
+				}
+				if event.Action == "start" {
+					started = true
+				}
+			}
+		case err := <-errs:
+			if err == io.EOF {
+				t.Fatal("premature end of event stream")
+			}
+			require.Nil(t, err)
+		case <-time.After(30 * time.Second):
+			// Fail the test
+			t.Fatal("event stream timeout")
+		}
+	}
+
+	// Ensure both events and container endpoints are passed to the
+	// authorization plugin
+	assertURIRecorded(t, ctrl.requestsURIs, "/events")
+	assertURIRecorded(t, ctrl.requestsURIs, "/containers/create")
+	assertURIRecorded(t, ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", createResponse.ID))
+}
+
+func systemTime(t *testing.T, client client.APIClient, testEnv *environment.Execution) time.Time {
+	if testEnv.IsLocalDaemon() {
+		return time.Now()
+	}
+
+	ctx := context.Background()
+	info, err := client.Info(ctx)
+	require.Nil(t, err)
+
+	dt, err := time.Parse(time.RFC3339Nano, info.SystemTime)
+	require.Nil(t, err, "invalid time format in GET /info response")
+	return dt
+}
+
+func systemEventsSince(client client.APIClient, since string) (<-chan eventtypes.Message, <-chan error, func()) {
+	eventOptions := types.EventsOptions{
+		Since: since,
+	}
+	ctx, cancel := context.WithCancel(context.Background())
+	events, errs := client.Events(ctx, eventOptions)
+
+	return events, errs, cancel
+}
+
+func TestAuthZPluginErrorResponse(t *testing.T) {
+	defer setupTestV1(t)()
+	d.Start(t, "--authorization-plugin="+testAuthZPlugin)
+	ctrl.reqRes.Allow = true
+	ctrl.resRes.Err = errorMessage
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Ensure command is blocked
+	_, err = client.ServerVersion(context.Background())
+	require.NotNil(t, err)
+	require.Equal(t, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage), err.Error())
+}
+
+func TestAuthZPluginErrorRequest(t *testing.T) {
+	defer setupTestV1(t)()
+	d.Start(t, "--authorization-plugin="+testAuthZPlugin)
+	ctrl.reqRes.Err = errorMessage
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Ensure command is blocked
+	_, err = client.ServerVersion(context.Background())
+	require.NotNil(t, err)
+	require.Equal(t, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage), err.Error())
+}
+
+func TestAuthZPluginEnsureNoDuplicatePluginRegistration(t *testing.T) {
+	defer setupTestV1(t)()
+	d.Start(t, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin)
+
+	ctrl.reqRes.Allow = true
+	ctrl.resRes.Allow = true
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	_, err = client.ServerVersion(context.Background())
+	require.Nil(t, err)
+
+	// assert plugin is only called once..
+	require.Equal(t, 1, ctrl.versionReqCount)
+	require.Equal(t, 1, ctrl.versionResCount)
+}
+
+func TestAuthZPluginEnsureLoadImportWorking(t *testing.T) {
+	defer setupTestV1(t)()
+	ctrl.reqRes.Allow = true
+	ctrl.resRes.Allow = true
+	d.StartWithBusybox(t, "--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin)
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	tmp, err := ioutil.TempDir("", "test-authz-load-import")
+	require.Nil(t, err)
+	defer os.RemoveAll(tmp)
+
+	savedImagePath := filepath.Join(tmp, "save.tar")
+
+	err = imageSave(client, savedImagePath, "busybox")
+	require.Nil(t, err)
+	err = imageLoad(client, savedImagePath)
+	require.Nil(t, err)
+
+	exportedImagePath := filepath.Join(tmp, "export.tar")
+
+	createResponse, err := client.ContainerCreate(context.Background(), &container.Config{Cmd: []string{}, Image: "busybox"}, &container.HostConfig{}, &networktypes.NetworkingConfig{}, "")
+	require.Nil(t, err)
+
+	err = client.ContainerStart(context.Background(), createResponse.ID, types.ContainerStartOptions{})
+	require.Nil(t, err)
+
+	responseReader, err := client.ContainerExport(context.Background(), createResponse.ID)
+	require.Nil(t, err)
+	defer responseReader.Close()
+	file, err := os.Create(exportedImagePath)
+	require.Nil(t, err)
+	defer file.Close()
+	_, err = io.Copy(file, responseReader)
+	require.Nil(t, err)
+
+	err = imageImport(client, exportedImagePath)
+	require.Nil(t, err)
+}
+
+func imageSave(client client.APIClient, path, image string) error {
+	ctx := context.Background()
+	responseReader, err := client.ImageSave(ctx, []string{image})
+	if err != nil {
+		return err
+	}
+	defer responseReader.Close()
+	file, err := os.Create(path)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+	_, err = io.Copy(file, responseReader)
+	return err
+}
+
+func imageLoad(client client.APIClient, path string) error {
+	file, err := os.Open(path)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+	quiet := true
+	ctx := context.Background()
+	response, err := client.ImageLoad(ctx, file, quiet)
+	if err != nil {
+		return err
+	}
+	defer response.Body.Close()
+	return nil
+}
+
+func imageImport(client client.APIClient, path string) error {
+	file, err := os.Open(path)
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+	options := types.ImageImportOptions{}
+	ref := ""
+	source := types.ImageImportSource{
+		Source:     file,
+		SourceName: "-",
+	}
+	ctx := context.Background()
+	responseReader, err := client.ImageImport(ctx, source, ref, options)
+	if err != nil {
+		return err
+	}
+	defer responseReader.Close()
+	return nil
+}
+
+func TestAuthZPluginHeader(t *testing.T) {
+	defer setupTestV1(t)()
+	ctrl.reqRes.Allow = true
+	ctrl.resRes.Allow = true
+	d.StartWithBusybox(t, "--debug", "--authorization-plugin="+testAuthZPlugin)
+
+	daemonURL, err := url.Parse(d.Sock())
+	require.Nil(t, err)
+
+	conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10)
+	require.Nil(t, err)
+	client := httputil.NewClientConn(conn, nil)
+	req, err := http.NewRequest("GET", "/version", nil)
+	require.Nil(t, err)
+	resp, err := client.Do(req)
+	require.Nil(t, err)
+	require.Equal(t, "application/json", resp.Header["Content-Type"][0])
+}
+
+// assertURIRecorded verifies that the given URI was sent and recorded
+// in the authz plugin
+func assertURIRecorded(t *testing.T, uris []string, uri string) {
+	var found bool
+	for _, u := range uris {
+		if strings.Contains(u, uri) {
+			found = true
+			break
+		}
+	}
+	if !found {
+		t.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ","))
+	}
+}
diff --git a/integration/plugin/authz/authz_plugin_v2_test.go b/integration/plugin/authz/authz_plugin_v2_test.go
new file mode 100644
index 0000000..d6c0498
--- /dev/null
+++ b/integration/plugin/authz/authz_plugin_v2_test.go
@@ -0,0 +1,178 @@
+// +build !windows
+
+package authz
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
+	"github.com/docker/docker/api/types/filters"
+	networktypes "github.com/docker/docker/api/types/network"
+	volumetypes "github.com/docker/docker/api/types/volume"
+	"github.com/docker/docker/client"
+	"github.com/docker/docker/integration/util/requirement"
+	"github.com/gotestyourself/gotestyourself/skip"
+	"github.com/stretchr/testify/require"
+)
+
+var (
+	authzPluginName            = "riyaz/authz-no-volume-plugin"
+	authzPluginTag             = "latest"
+	authzPluginNameWithTag     = authzPluginName + ":" + authzPluginTag
+	authzPluginBadManifestName = "riyaz/authz-plugin-bad-manifest"
+	nonexistentAuthzPluginName = "riyaz/nonexistent-authz-plugin"
+)
+
+func setupTestV2(t *testing.T) func() {
+	skip.IfCondition(t, testEnv.DaemonInfo.OSType != "linux")
+	skip.IfCondition(t, !requirement.HasHubConnectivity(t))
+
+	teardown := setupTest(t)
+
+	d.Start(t)
+
+	return teardown
+}
+
+func TestAuthZPluginV2AllowNonVolumeRequest(t *testing.T) {
+	skip.IfCondition(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
+	defer setupTestV2(t)()
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Install authz plugin
+	err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag)
+	require.Nil(t, err)
+	// start the daemon with the plugin and load busybox, --net=none build fails otherwise
+	// because it needs to pull busybox
+	d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag)
+	d.LoadBusybox(t)
+
+	// Ensure docker run command and accompanying docker ps are successful
+	createResponse, err := client.ContainerCreate(context.Background(), &container.Config{Cmd: []string{"top"}, Image: "busybox"}, &container.HostConfig{}, &networktypes.NetworkingConfig{}, "")
+	require.Nil(t, err)
+
+	err = client.ContainerStart(context.Background(), createResponse.ID, types.ContainerStartOptions{})
+	require.Nil(t, err)
+
+	_, err = client.ContainerInspect(context.Background(), createResponse.ID)
+	require.Nil(t, err)
+}
+
+func TestAuthZPluginV2Disable(t *testing.T) {
+	skip.IfCondition(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
+	defer setupTestV2(t)()
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Install authz plugin
+	err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag)
+	require.Nil(t, err)
+
+	d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag)
+	d.LoadBusybox(t)
+
+	_, err = client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{Driver: "local"})
+	require.NotNil(t, err)
+	require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)))
+
+	// disable the plugin
+	err = client.PluginDisable(context.Background(), authzPluginNameWithTag, types.PluginDisableOptions{})
+	require.Nil(t, err)
+
+	// now test to see if the docker api works.
+	_, err = client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{Driver: "local"})
+	require.Nil(t, err)
+}
+
+func TestAuthZPluginV2RejectVolumeRequests(t *testing.T) {
+	skip.IfCondition(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
+	defer setupTestV2(t)()
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Install authz plugin
+	err = pluginInstallGrantAllPermissions(client, authzPluginNameWithTag)
+	require.Nil(t, err)
+
+	// restart the daemon with the plugin
+	d.Restart(t, "--authorization-plugin="+authzPluginNameWithTag)
+
+	_, err = client.VolumeCreate(context.Background(), volumetypes.VolumesCreateBody{Driver: "local"})
+	require.NotNil(t, err)
+	require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)))
+
+	_, err = client.VolumeList(context.Background(), filters.Args{})
+	require.NotNil(t, err)
+	require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)))
+
+	// The plugin will block the command before it can determine the volume does not exist
+	err = client.VolumeRemove(context.Background(), "test", false)
+	require.NotNil(t, err)
+	require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)))
+
+	_, err = client.VolumeInspect(context.Background(), "test")
+	require.NotNil(t, err)
+	require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)))
+
+	_, err = client.VolumesPrune(context.Background(), filters.Args{})
+	require.NotNil(t, err)
+	require.True(t, strings.Contains(err.Error(), fmt.Sprintf("Error response from daemon: plugin %s failed with error:", authzPluginNameWithTag)))
+}
+
+func TestAuthZPluginV2BadManifestFailsDaemonStart(t *testing.T) {
+	skip.IfCondition(t, os.Getenv("DOCKER_ENGINE_GOARCH") != "amd64")
+	defer setupTestV2(t)()
+
+	client, err := d.NewClient()
+	require.Nil(t, err)
+
+	// Install authz plugin with bad manifest
+	err = pluginInstallGrantAllPermissions(client, authzPluginBadManifestName)
+	require.Nil(t, err)
+
+	// start the daemon with the plugin, it will error
+	err = d.RestartWithError("--authorization-plugin=" + authzPluginBadManifestName)
+	require.NotNil(t, err)
+
+	// restarting the daemon without requiring the plugin will succeed
+	d.Start(t)
+}
+
+func TestAuthZPluginV2NonexistentFailsDaemonStart(t *testing.T) {
+	defer setupTestV2(t)()
+
+	// start the daemon with a non-existent authz plugin, it will error
+	err := d.RestartWithError("--authorization-plugin=" + nonexistentAuthzPluginName)
+	require.NotNil(t, err)
+
+	// restarting the daemon without requiring the plugin will succeed
+	d.Start(t)
+}
+
+func pluginInstallGrantAllPermissions(client client.APIClient, name string) error {
+	ctx := context.Background()
+	options := types.PluginInstallOptions{
+		RemoteRef:            name,
+		AcceptAllPermissions: true,
+	}
+	responseReader, err := client.PluginInstall(ctx, "", options)
+	if err != nil {
+		return err
+	}
+	defer responseReader.Close()
+	// we have to read the response out here because the client API
+	// actually starts a goroutine which we can only be sure has
+	// completed when we get EOF from reading responseBody
+	_, err = ioutil.ReadAll(responseReader)
+	return err
+}
diff --git a/integration/plugin/authz/main_test.go b/integration/plugin/authz/main_test.go
new file mode 100644
index 0000000..0ecf66c
--- /dev/null
+++ b/integration/plugin/authz/main_test.go
@@ -0,0 +1,182 @@
+// +build !windows
+
+package authz
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/http/httptest"
+	"os"
+	"strings"
+	"testing"
+
+	"github.com/docker/docker/integration-cli/daemon"
+	"github.com/docker/docker/internal/test/environment"
+	"github.com/docker/docker/pkg/authorization"
+	"github.com/docker/docker/pkg/plugins"
+)
+
+var (
+	testEnv *environment.Execution
+	d       *daemon.Daemon
+	server  *httptest.Server
+)
+
+const dockerdBinary = "dockerd"
+
+func TestMain(m *testing.M) {
+	var err error
+	testEnv, err = environment.New()
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+	err = environment.EnsureFrozenImagesLinux(testEnv)
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+
+	testEnv.Print()
+	setupSuite()
+	exitCode := m.Run()
+	teardownSuite()
+
+	os.Exit(exitCode)
+}
+
+func setupTest(t *testing.T) func() {
+	environment.ProtectAll(t, testEnv)
+
+	d = daemon.New(t, "", dockerdBinary, daemon.Config{
+		Experimental: testEnv.DaemonInfo.ExperimentalBuild,
+	})
+
+	return func() {
+		if d != nil {
+			d.Stop(t)
+		}
+		testEnv.Clean(t)
+	}
+}
+
+func setupSuite() {
+	mux := http.NewServeMux()
+	server = httptest.NewServer(mux)
+
+	mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) {
+		b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}})
+		if err != nil {
+			panic("could not marshal json for /Plugin.Activate: " + err.Error())
+		}
+		w.Write(b)
+	})
+
+	mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) {
+		defer r.Body.Close()
+		body, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			panic("could not read body for /AuthZPlugin.AuthZReq: " + err.Error())
+		}
+		authReq := authorization.Request{}
+		err = json.Unmarshal(body, &authReq)
+		if err != nil {
+			panic("could not unmarshal json for /AuthZPlugin.AuthZReq: " + err.Error())
+		}
+
+		assertBody(authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody)
+		assertAuthHeaders(authReq.RequestHeaders)
+
+		// Count only server version api
+		if strings.HasSuffix(authReq.RequestURI, serverVersionAPI) {
+			ctrl.versionReqCount++
+		}
+
+		ctrl.requestsURIs = append(ctrl.requestsURIs, authReq.RequestURI)
+
+		reqRes := ctrl.reqRes
+		if isAllowed(authReq.RequestURI) {
+			reqRes = authorization.Response{Allow: true}
+		}
+		if reqRes.Err != "" {
+			w.WriteHeader(http.StatusInternalServerError)
+		}
+		b, err := json.Marshal(reqRes)
+		if err != nil {
+			panic("could not marshal json for /AuthZPlugin.AuthZReq: " + err.Error())
+		}
+
+		ctrl.reqUser = authReq.User
+		w.Write(b)
+	})
+
+	mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) {
+		defer r.Body.Close()
+		body, err := ioutil.ReadAll(r.Body)
+		if err != nil {
+			panic("could not read body for /AuthZPlugin.AuthZRes: " + err.Error())
+		}
+		authReq := authorization.Request{}
+		err = json.Unmarshal(body, &authReq)
+		if err != nil {
+			panic("could not unmarshal json for /AuthZPlugin.AuthZRes: " + err.Error())
+		}
+
+		assertBody(authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody)
+		assertAuthHeaders(authReq.ResponseHeaders)
+
+		// Count only server version api
+		if strings.HasSuffix(authReq.RequestURI, serverVersionAPI) {
+			ctrl.versionResCount++
+		}
+		resRes := ctrl.resRes
+		if isAllowed(authReq.RequestURI) {
+			resRes = authorization.Response{Allow: true}
+		}
+		if resRes.Err != "" {
+			w.WriteHeader(http.StatusInternalServerError)
+		}
+		b, err := json.Marshal(resRes)
+		if err != nil {
+			panic("could not marshal json for /AuthZPlugin.AuthZRes: " + err.Error())
+		}
+		ctrl.resUser = authReq.User
+		w.Write(b)
+	})
+}
+
+func teardownSuite() {
+	if server == nil {
+		return
+	}
+
+	server.Close()
+}
+
+// assertAuthHeaders validates authentication headers are removed
+func assertAuthHeaders(headers map[string]string) error {
+	for k := range headers {
+		if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") {
+			panic(fmt.Sprintf("Found authentication headers in request '%v'", headers))
+		}
+	}
+	return nil
+}
+
+// assertBody asserts that body is removed for non text/json requests
+func assertBody(requestURI string, headers map[string]string, body []byte) {
+	if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 {
+		panic("Body included for authentication endpoint " + string(body))
+	}
+
+	for k, v := range headers {
+		if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" {
+			return
+		}
+	}
+	if len(body) > 0 {
+		panic(fmt.Sprintf("Body included while it should not (Headers: '%v')", headers))
+	}
+}
diff --git a/integration/plugin/pkg_test.go b/integration/plugin/pkg_test.go
new file mode 100644
index 0000000..b0736c3
--- /dev/null
+++ b/integration/plugin/pkg_test.go
@@ -0,0 +1 @@
+package plugin
diff --git a/integration/plugin/volume/cmd/cmd_test.go b/integration/plugin/volume/cmd/cmd_test.go
new file mode 100644
index 0000000..1d619dd
--- /dev/null
+++ b/integration/plugin/volume/cmd/cmd_test.go
@@ -0,0 +1 @@
+package cmd
diff --git a/integration/plugin/volume/cmd/create-error/main.go b/integration/plugin/volume/cmd/create-error/main.go
new file mode 100644
index 0000000..f23be51
--- /dev/null
+++ b/integration/plugin/volume/cmd/create-error/main.go
@@ -0,0 +1,23 @@
+package main
+
+import (
+	"net"
+	"net/http"
+)
+
+func main() {
+	l, err := net.Listen("unix", "/run/docker/plugins/plugin.sock")
+	if err != nil {
+		panic(err)
+	}
+
+	mux := http.NewServeMux()
+	server := http.Server{
+		Addr:    l.Addr().String(),
+		Handler: http.NewServeMux(),
+	}
+	mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
+		http.Error(w, "error during create", http.StatusInternalServerError)
+	})
+	server.Serve(l)
+}
diff --git a/integration/plugin/volume/cmd/create-error/main_test.go b/integration/plugin/volume/cmd/create-error/main_test.go
new file mode 100644
index 0000000..06ab7d0
--- /dev/null
+++ b/integration/plugin/volume/cmd/create-error/main_test.go
@@ -0,0 +1 @@
+package main
diff --git a/integration/plugin/volume/create_test.go b/integration/plugin/volume/create_test.go
new file mode 100644
index 0000000..ce9b4dc
--- /dev/null
+++ b/integration/plugin/volume/create_test.go
@@ -0,0 +1,51 @@
+// +build linux
+
+package volume
+
+import (
+	"context"
+	"testing"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/volume"
+	"github.com/docker/docker/integration-cli/daemon"
+)
+
+// TestCreateDerefOnError ensures that if a volume create fails, that the plugin is dereferenced
+// Normally 1 volume == 1 reference to a plugin, which prevents a plugin from being removed.
+// If the volume create fails, we should make sure to dereference the plugin.
+func TestCreateDerefOnError(t *testing.T) {
+	t.Parallel()
+
+	d := daemon.New(t, "", dockerdBinary, daemon.Config{})
+	d.Start(t)
+	defer d.Stop(t)
+
+	c, err := d.NewClient()
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	pName := "testderef"
+	createPlugin(t, c, pName, "create-error", asVolumeDriver)
+
+	if err := c.PluginEnable(context.Background(), pName, types.PluginEnableOptions{Timeout: 30}); err != nil {
+		t.Fatal(err)
+	}
+
+	_, err = c.VolumeCreate(context.Background(), volume.VolumesCreateBody{
+		Driver: pName,
+		Name:   "fake",
+	})
+	if err == nil {
+		t.Fatal("volume create should have failed")
+	}
+
+	if err := c.PluginDisable(context.Background(), pName, types.PluginDisableOptions{}); err != nil {
+		t.Fatal(err)
+	}
+
+	if err := c.PluginRemove(context.Background(), pName, types.PluginRemoveOptions{}); err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/integration/plugin/volume/main_test.go b/integration/plugin/volume/main_test.go
new file mode 100644
index 0000000..8cfbf37
--- /dev/null
+++ b/integration/plugin/volume/main_test.go
@@ -0,0 +1,69 @@
+package volume
+
+import (
+	"context"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"testing"
+	"time"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/integration-cli/fixtures/plugin"
+	"github.com/docker/docker/pkg/locker"
+	"github.com/pkg/errors"
+)
+
+const dockerdBinary = "dockerd"
+
+var pluginBuildLock = locker.New()
+
+func ensurePlugin(t *testing.T, name string) string {
+	pluginBuildLock.Lock(name)
+	defer pluginBuildLock.Unlock(name)
+
+	installPath := filepath.Join(os.Getenv("GOPATH"), "bin", name)
+	if _, err := os.Stat(installPath); err == nil {
+		return installPath
+	}
+
+	goBin, err := exec.LookPath("go")
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	cmd := exec.Command(goBin, "build", "-o", installPath, "./"+filepath.Join("cmd", name))
+	cmd.Env = append(cmd.Env, "CGO_ENABLED=0")
+	if out, err := cmd.CombinedOutput(); err != nil {
+		t.Fatal(errors.Wrapf(err, "error building basic plugin bin: %s", string(out)))
+	}
+
+	return installPath
+}
+
+func asVolumeDriver(cfg *plugin.Config) {
+	cfg.Interface.Types = []types.PluginInterfaceType{
+		{Capability: "volumedriver", Prefix: "docker", Version: "1.0"},
+	}
+}
+
+func withSockPath(name string) func(*plugin.Config) {
+	return func(cfg *plugin.Config) {
+		cfg.Interface.Socket = name
+	}
+}
+
+func createPlugin(t *testing.T, client plugin.CreateClient, alias, bin string, opts ...plugin.CreateOpt) {
+	pluginBin := ensurePlugin(t, bin)
+
+	opts = append(opts, withSockPath("plugin.sock"))
+	opts = append(opts, plugin.WithBinary(pluginBin))
+
+	ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
+	err := plugin.Create(ctx, client, alias, opts...)
+	cancel()
+
+	if err != nil {
+		t.Fatal(err)
+	}
+}
diff --git a/integration/service/create_test.go b/integration/service/create_test.go
new file mode 100644
index 0000000..e94185a
--- /dev/null
+++ b/integration/service/create_test.go
@@ -0,0 +1,137 @@
+package service
+
+import (
+	"runtime"
+	"testing"
+	"time"
+
+	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/filters"
+	"github.com/docker/docker/api/types/swarm"
+	"github.com/docker/docker/client"
+	"github.com/docker/docker/integration-cli/request"
+	"github.com/gotestyourself/gotestyourself/poll"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/net/context"
+)
+
+func TestCreateWithLBSandbox(t *testing.T) {
+	defer setupTest(t)()
+	d := newSwarm(t)
+	defer d.Stop(t)
+	client, err := request.NewClientForHost(d.Sock())
+	require.NoError(t, err)
+
+	overlayName := "overlay1"
+	networkCreate := types.NetworkCreate{
+		CheckDuplicate: true,
+		Driver:         "overlay",
+	}
+
+	netResp, err := client.NetworkCreate(context.Background(), overlayName, networkCreate)
+	require.NoError(t, err)
+	overlayID := netResp.ID
+
+	var instances uint64 = 1
+	serviceSpec := swarmServiceSpec("TestService", instances)
+
+	serviceSpec.TaskTemplate.Networks = append(serviceSpec.TaskTemplate.Networks, swarm.NetworkAttachmentConfig{Target: overlayName})
+
+	serviceResp, err := client.ServiceCreate(context.Background(), serviceSpec, types.ServiceCreateOptions{
+		QueryRegistry: false,
+	})
+	require.NoError(t, err)
+
+	pollSettings := func(config *poll.Settings) {
+		if runtime.GOARCH == "arm" {
+			config.Timeout = 30 * time.Second
+			config.Delay = 100 * time.Millisecond
+		}
+	}
+
+	serviceID := serviceResp.ID
+	poll.WaitOn(t, serviceRunningTasksCount(client, serviceID, instances), pollSettings)
+
+	_, _, err = client.ServiceInspectWithRaw(context.Background(), serviceID, types.ServiceInspectOptions{})
+	require.NoError(t, err)
+
+	network, err := client.NetworkInspect(context.Background(), overlayID, types.NetworkInspectOptions{})
+	require.NoError(t, err)
+	assert.Contains(t, network.Containers, overlayName+"-sbox")
+
+	err = client.ServiceRemove(context.Background(), serviceID)
+	require.NoError(t, err)
+
+	poll.WaitOn(t, serviceIsRemoved(client, serviceID), pollSettings)
+	err = client.NetworkRemove(context.Background(), overlayID)
+	require.NoError(t, err)
+
+	poll.WaitOn(t, networkIsRemoved(client, overlayID), poll.WithTimeout(1*time.Minute), poll.WithDelay(10*time.Second))
+}
+
+func swarmServiceSpec(name string, replicas uint64) swarm.ServiceSpec {
+	return swarm.ServiceSpec{
+		Annotations: swarm.Annotations{
+			Name: name,
+		},
+		TaskTemplate: swarm.TaskSpec{
+			ContainerSpec: &swarm.ContainerSpec{
+				Image:   "busybox:latest",
+				Command: []string{"/bin/top"},
+			},
+		},
+		Mode: swarm.ServiceMode{
+			Replicated: &swarm.ReplicatedService{
+				Replicas: &replicas,
+			},
+		},
+	}
+}
+
+func serviceRunningTasksCount(client client.ServiceAPIClient, serviceID string, instances uint64) func(log poll.LogT) poll.Result {
+	return func(log poll.LogT) poll.Result {
+		filter := filters.NewArgs()
+		filter.Add("service", serviceID)
+		tasks, err := client.TaskList(context.Background(), types.TaskListOptions{
+			Filters: filter,
+		})
+		switch {
+		case err != nil:
+			return poll.Error(err)
+		case len(tasks) == int(instances):
+			for _, task := range tasks {
+				if task.Status.State != swarm.TaskStateRunning {
+					return poll.Continue("waiting for tasks to enter run state")
+				}
+			}
+			return poll.Success()
+		default:
+			return poll.Continue("task count at %d waiting for %d", len(tasks), instances)
+		}
+	}
+}
+
+func serviceIsRemoved(client client.ServiceAPIClient, serviceID string) func(log poll.LogT) poll.Result {
+	return func(log poll.LogT) poll.Result {
+		filter := filters.NewArgs()
+		filter.Add("service", serviceID)
+		_, err := client.TaskList(context.Background(), types.TaskListOptions{
+			Filters: filter,
+		})
+		if err == nil {
+			return poll.Continue("waiting for service %s to be deleted", serviceID)
+		}
+		return poll.Success()
+	}
+}
+
+func networkIsRemoved(client client.NetworkAPIClient, networkID string) func(log poll.LogT) poll.Result {
+	return func(log poll.LogT) poll.Result {
+		_, err := client.NetworkInspect(context.Background(), networkID, types.NetworkInspectOptions{})
+		if err == nil {
+			return poll.Continue("waiting for network %s to be removed", networkID)
+		}
+		return poll.Success()
+	}
+}
diff --git a/integration/service/inspect_test.go b/integration/service/inspect_test.go
index 239075a..5129aa2 100644
--- a/integration/service/inspect_test.go
+++ b/integration/service/inspect_test.go
@@ -6,18 +6,21 @@
 	"time"
 
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/api/types/container"
 	"github.com/docker/docker/api/types/filters"
 	"github.com/docker/docker/api/types/swarm"
 	"github.com/docker/docker/client"
 	"github.com/docker/docker/integration-cli/daemon"
 	"github.com/docker/docker/integration-cli/request"
 	"github.com/gotestyourself/gotestyourself/poll"
+	"github.com/gotestyourself/gotestyourself/skip"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"golang.org/x/net/context"
 )
 
 func TestInspect(t *testing.T) {
+	skip.IfCondition(t, !testEnv.IsLocalDaemon())
 	defer setupTest(t)()
 	d := newSwarm(t)
 	defer d.Stop(t)
@@ -74,6 +77,7 @@
 					Nameservers: []string{"8.8.8.8"},
 					Search:      []string{"somedomain"},
 				},
+				Isolation: container.IsolationDefault,
 			},
 			RestartPolicy: &swarm.RestartPolicy{
 				Delay:       &restartDelay,
diff --git a/integration/service/main_test.go b/integration/service/main_test.go
index 4d6af81..6dfb6b9 100644
--- a/integration/service/main_test.go
+++ b/integration/service/main_test.go
@@ -19,12 +19,17 @@
 		fmt.Println(err)
 		os.Exit(1)
 	}
+	err = environment.EnsureFrozenImagesLinux(testEnv)
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
 
 	testEnv.Print()
 	os.Exit(m.Run())
 }
 
 func setupTest(t *testing.T) func() {
-	environment.ProtectImages(t, testEnv)
+	environment.ProtectAll(t, testEnv)
 	return func() { testEnv.Clean(t) }
 }
diff --git a/integration/system/main_test.go b/integration/system/main_test.go
new file mode 100644
index 0000000..aa332c9
--- /dev/null
+++ b/integration/system/main_test.go
@@ -0,0 +1,33 @@
+package system
+
+import (
+	"fmt"
+	"os"
+	"testing"
+
+	"github.com/docker/docker/internal/test/environment"
+)
+
+var testEnv *environment.Execution
+
+func TestMain(m *testing.M) {
+	var err error
+	testEnv, err = environment.New()
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+	err = environment.EnsureFrozenImagesLinux(testEnv)
+	if err != nil {
+		fmt.Println(err)
+		os.Exit(1)
+	}
+
+	testEnv.Print()
+	os.Exit(m.Run())
+}
+
+func setupTest(t *testing.T) func() {
+	environment.ProtectAll(t, testEnv)
+	return func() { testEnv.Clean(t) }
+}
diff --git a/integration/system/version_test.go b/integration/system/version_test.go
new file mode 100644
index 0000000..c1231fd
--- /dev/null
+++ b/integration/system/version_test.go
@@ -0,0 +1,23 @@
+package system
+
+import (
+	"testing"
+
+	"github.com/docker/docker/integration/util/request"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+	"golang.org/x/net/context"
+)
+
+func TestVersion(t *testing.T) {
+	client := request.NewAPIClient(t)
+
+	version, err := client.ServerVersion(context.Background())
+	require.NoError(t, err)
+
+	assert.NotNil(t, version.APIVersion)
+	assert.NotNil(t, version.Version)
+	assert.NotNil(t, version.MinAPIVersion)
+	assert.Equal(t, testEnv.DaemonInfo.ExperimentalBuild, version.Experimental)
+	assert.Equal(t, testEnv.OSType, version.Os)
+}
diff --git a/integration-cli/fixtures/https/ca.pem b/integration/testdata/https/ca.pem
similarity index 100%
rename from integration-cli/fixtures/https/ca.pem
rename to integration/testdata/https/ca.pem
diff --git a/integration-cli/fixtures/https/client-cert.pem b/integration/testdata/https/client-cert.pem
similarity index 100%
rename from integration-cli/fixtures/https/client-cert.pem
rename to integration/testdata/https/client-cert.pem
diff --git a/integration-cli/fixtures/https/client-key.pem b/integration/testdata/https/client-key.pem
similarity index 100%
rename from integration-cli/fixtures/https/client-key.pem
rename to integration/testdata/https/client-key.pem
diff --git a/integration-cli/fixtures/https/server-cert.pem b/integration/testdata/https/server-cert.pem
similarity index 100%
rename from integration-cli/fixtures/https/server-cert.pem
rename to integration/testdata/https/server-cert.pem
diff --git a/integration-cli/fixtures/https/server-key.pem b/integration/testdata/https/server-key.pem
similarity index 100%
rename from integration-cli/fixtures/https/server-key.pem
rename to integration/testdata/https/server-key.pem
diff --git a/integration/util/request/client.go b/integration/util/request/client.go
index 7715636..b35306c 100644
--- a/integration/util/request/client.go
+++ b/integration/util/request/client.go
@@ -1,9 +1,15 @@
 package request
 
 import (
+	"net"
+	"net/http"
 	"testing"
+	"time"
 
+	"github.com/docker/docker/api"
 	"github.com/docker/docker/client"
+	"github.com/docker/go-connections/sockets"
+	"github.com/docker/go-connections/tlsconfig"
 	"github.com/stretchr/testify/require"
 )
 
@@ -13,3 +19,35 @@
 	require.NoError(t, err)
 	return clt
 }
+
+// NewTLSAPIClient returns a docker API client configured with the
+// provided TLS settings
+func NewTLSAPIClient(t *testing.T, host, cacertPath, certPath, keyPath string) (client.APIClient, error) {
+	opts := tlsconfig.Options{
+		CAFile:             cacertPath,
+		CertFile:           certPath,
+		KeyFile:            keyPath,
+		ExclusiveRootPools: true,
+	}
+	config, err := tlsconfig.Client(opts)
+	require.Nil(t, err)
+	tr := &http.Transport{
+		TLSClientConfig: config,
+		DialContext: (&net.Dialer{
+			KeepAlive: 30 * time.Second,
+			Timeout:   30 * time.Second,
+		}).DialContext,
+	}
+	proto, addr, _, err := client.ParseHost(host)
+	require.Nil(t, err)
+
+	sockets.ConfigureTransport(tr, proto, addr)
+
+	httpClient := &http.Client{
+		Transport:     tr,
+		CheckRedirect: client.CheckRedirect,
+	}
+	verStr := api.DefaultVersion
+	customHeaders := map[string]string{}
+	return client.NewClient(host, verStr, httpClient, customHeaders)
+}
diff --git a/integration/util/requirement/requirement.go b/integration/util/requirement/requirement.go
new file mode 100644
index 0000000..936b2dd
--- /dev/null
+++ b/integration/util/requirement/requirement.go
@@ -0,0 +1,26 @@
+package requirement
+
+import (
+	"net/http"
+	"strings"
+	"testing"
+	"time"
+)
+
+// HasHubConnectivity checks to see if https://hub.docker.com is
+// accessible from the present environment
+func HasHubConnectivity(t *testing.T) bool {
+	// Set a timeout on the GET at 15s
+	var timeout = 15 * time.Second
+	var url = "https://hub.docker.com"
+
+	client := http.Client{Timeout: timeout}
+	resp, err := client.Get(url)
+	if err != nil && strings.Contains(err.Error(), "use of closed network connection") {
+		t.Fatalf("Timeout for GET request on %s", url)
+	}
+	if resp != nil {
+		resp.Body.Close()
+	}
+	return err == nil
+}
diff --git a/internal/test/environment/clean.go b/internal/test/environment/clean.go
index c6392dc..e9548d1 100644
--- a/internal/test/environment/clean.go
+++ b/internal/test/environment/clean.go
@@ -28,16 +28,16 @@
 func (e *Execution) Clean(t testingT) {
 	client := e.APIClient()
 
-	platform := e.DaemonInfo.OSType
+	platform := e.OSType
 	if (platform != "windows") || (platform == "windows" && e.DaemonInfo.Isolation == "hyperv") {
 		unpauseAllContainers(t, client)
 	}
-	deleteAllContainers(t, client)
+	deleteAllContainers(t, client, e.protectedElements.containers)
 	deleteAllImages(t, client, e.protectedElements.images)
-	deleteAllVolumes(t, client)
-	deleteAllNetworks(t, client, platform)
+	deleteAllVolumes(t, client, e.protectedElements.volumes)
+	deleteAllNetworks(t, client, platform, e.protectedElements.networks)
 	if platform == "linux" {
-		deleteAllPlugins(t, client)
+		deleteAllPlugins(t, client, e.protectedElements.plugins)
 	}
 }
 
@@ -66,7 +66,7 @@
 
 var alreadyExists = regexp.MustCompile(`Error response from daemon: removal of container (\w+) is already in progress`)
 
-func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient) {
+func deleteAllContainers(t assert.TestingT, apiclient client.ContainerAPIClient, protectedContainers map[string]struct{}) {
 	ctx := context.Background()
 	containers := getAllContainers(ctx, t, apiclient)
 	if len(containers) == 0 {
@@ -74,11 +74,14 @@
 	}
 
 	for _, container := range containers {
+		if _, ok := protectedContainers[container.ID]; ok {
+			continue
+		}
 		err := apiclient.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{
 			Force:         true,
 			RemoveVolumes: true,
 		})
-		if err == nil || client.IsErrNotFound(err) || alreadyExists.MatchString(err.Error()) {
+		if err == nil || client.IsErrNotFound(err) || alreadyExists.MatchString(err.Error()) || isErrNotFoundSwarmClassic(err) {
 			continue
 		}
 		assert.NoError(t, err, "failed to remove %s", container.ID)
@@ -126,17 +129,24 @@
 	assert.NoError(t, err, "failed to remove image %s", ref)
 }
 
-func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient) {
+func deleteAllVolumes(t assert.TestingT, c client.VolumeAPIClient, protectedVolumes map[string]struct{}) {
 	volumes, err := c.VolumeList(context.Background(), filters.Args{})
 	assert.NoError(t, err, "failed to list volumes")
 
 	for _, v := range volumes.Volumes {
+		if _, ok := protectedVolumes[v.Name]; ok {
+			continue
+		}
 		err := c.VolumeRemove(context.Background(), v.Name, true)
+		// Docker EE may list volumes that no longer exist.
+		if isErrNotFoundSwarmClassic(err) {
+			continue
+		}
 		assert.NoError(t, err, "failed to remove volume %s", v.Name)
 	}
 }
 
-func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatform string) {
+func deleteAllNetworks(t assert.TestingT, c client.NetworkAPIClient, daemonPlatform string, protectedNetworks map[string]struct{}) {
 	networks, err := c.NetworkList(context.Background(), types.NetworkListOptions{})
 	assert.NoError(t, err, "failed to list networks")
 
@@ -144,6 +154,9 @@
 		if n.Name == "bridge" || n.Name == "none" || n.Name == "host" {
 			continue
 		}
+		if _, ok := protectedNetworks[n.ID]; ok {
+			continue
+		}
 		if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" {
 			// nat is a pre-defined network on Windows and cannot be removed
 			continue
@@ -153,12 +166,25 @@
 	}
 }
 
-func deleteAllPlugins(t assert.TestingT, c client.PluginAPIClient) {
+func deleteAllPlugins(t assert.TestingT, c client.PluginAPIClient, protectedPlugins map[string]struct{}) {
 	plugins, err := c.PluginList(context.Background(), filters.Args{})
+	// Docker EE does not allow cluster-wide plugin management.
+	if client.IsErrNotImplemented(err) {
+		return
+	}
 	assert.NoError(t, err, "failed to list plugins")
 
 	for _, p := range plugins {
+		if _, ok := protectedPlugins[p.Name]; ok {
+			continue
+		}
 		err := c.PluginRemove(context.Background(), p.Name, types.PluginRemoveOptions{Force: true})
 		assert.NoError(t, err, "failed to remove plugin %s", p.ID)
 	}
 }
+
+// Swarm classic aggregates node errors and returns a 500 so we need to check
+// the error string instead of just IsErrNotFound().
+func isErrNotFoundSwarmClassic(err error) bool {
+	return err != nil && strings.Contains(strings.ToLower(err.Error()), "no such")
+}
diff --git a/internal/test/environment/environment.go b/internal/test/environment/environment.go
index afe8929..fb92727 100644
--- a/internal/test/environment/environment.go
+++ b/internal/test/environment/environment.go
@@ -8,6 +8,7 @@
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/client"
+	"github.com/docker/docker/integration-cli/fixtures/load"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 )
@@ -17,6 +18,7 @@
 type Execution struct {
 	client            client.APIClient
 	DaemonInfo        types.Info
+	OSType            string
 	PlatformDefaults  PlatformDefaults
 	protectedElements protectedElements
 }
@@ -40,19 +42,31 @@
 		return nil, errors.Wrapf(err, "failed to get info from daemon")
 	}
 
+	osType := getOSType(info)
+
 	return &Execution{
 		client:            client,
 		DaemonInfo:        info,
-		PlatformDefaults:  getPlatformDefaults(info),
+		OSType:            osType,
+		PlatformDefaults:  getPlatformDefaults(info, osType),
 		protectedElements: newProtectedElements(),
 	}, nil
 }
 
-func getPlatformDefaults(info types.Info) PlatformDefaults {
+func getOSType(info types.Info) string {
+	// Docker EE does not set the OSType so allow the user to override this value.
+	userOsType := os.Getenv("TEST_OSTYPE")
+	if userOsType != "" {
+		return userOsType
+	}
+	return info.OSType
+}
+
+func getPlatformDefaults(info types.Info, osType string) PlatformDefaults {
 	volumesPath := filepath.Join(info.DockerRootDir, "volumes")
 	containersPath := filepath.Join(info.DockerRootDir, "containers")
 
-	switch info.OSType {
+	switch osType {
 	case "linux":
 		return PlatformDefaults{
 			BaseImage:            "scratch",
@@ -71,7 +85,7 @@
 			ContainerStoragePath: filepath.FromSlash(containersPath),
 		}
 	default:
-		panic(fmt.Sprintf("unknown info.OSType for daemon: %s", info.OSType))
+		panic(fmt.Sprintf("unknown OSType for daemon: %s", osType))
 	}
 }
 
@@ -115,3 +129,15 @@
 func (e *Execution) APIClient() client.APIClient {
 	return e.client
 }
+
+// EnsureFrozenImagesLinux loads frozen test images into the daemon
+// if they aren't already loaded
+func EnsureFrozenImagesLinux(testEnv *Execution) error {
+	if testEnv.OSType == "linux" {
+		err := load.FrozenImagesLinux(testEnv.APIClient(), frozenImages...)
+		if err != nil {
+			return errors.Wrap(err, "error loading frozen images")
+		}
+	}
+	return nil
+}
diff --git a/internal/test/environment/protect.go b/internal/test/environment/protect.go
index 5821863..7885f80 100644
--- a/internal/test/environment/protect.go
+++ b/internal/test/environment/protect.go
@@ -5,12 +5,70 @@
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/api/types/filters"
-	"github.com/docker/docker/integration-cli/fixtures/load"
+	dclient "github.com/docker/docker/client"
 	"github.com/stretchr/testify/require"
 )
 
+var frozenImages = []string{"busybox:latest", "hello-world:frozen", "debian:jessie"}
+
 type protectedElements struct {
-	images map[string]struct{}
+	containers map[string]struct{}
+	images     map[string]struct{}
+	networks   map[string]struct{}
+	plugins    map[string]struct{}
+	volumes    map[string]struct{}
+}
+
+func newProtectedElements() protectedElements {
+	return protectedElements{
+		containers: map[string]struct{}{},
+		images:     map[string]struct{}{},
+		networks:   map[string]struct{}{},
+		plugins:    map[string]struct{}{},
+		volumes:    map[string]struct{}{},
+	}
+}
+
+// ProtectAll protects the existing environment (containers, images, networks,
+// volumes, and, on Linux, plugins) from being cleaned up at the end of test
+// runs
+func ProtectAll(t testingT, testEnv *Execution) {
+	ProtectContainers(t, testEnv)
+	ProtectImages(t, testEnv)
+	ProtectNetworks(t, testEnv)
+	ProtectVolumes(t, testEnv)
+	if testEnv.OSType == "linux" {
+		ProtectPlugins(t, testEnv)
+	}
+}
+
+// ProtectContainer adds the specified container(s) to be protected in case of
+// clean
+func (e *Execution) ProtectContainer(t testingT, containers ...string) {
+	for _, container := range containers {
+		e.protectedElements.containers[container] = struct{}{}
+	}
+}
+
+// ProtectContainers protects existing containers from being cleaned up at the
+// end of test runs
+func ProtectContainers(t testingT, testEnv *Execution) {
+	containers := getExistingContainers(t, testEnv)
+	testEnv.ProtectContainer(t, containers...)
+}
+
+func getExistingContainers(t require.TestingT, testEnv *Execution) []string {
+	client := testEnv.APIClient()
+	containerList, err := client.ContainerList(context.Background(), types.ContainerListOptions{
+		All: true,
+	})
+	require.NoError(t, err, "failed to list containers")
+
+	containers := []string{}
+	for _, container := range containerList {
+		containers = append(containers, container.ID)
+	}
+	return containers
 }
 
 // ProtectImage adds the specified image(s) to be protected in case of clean
@@ -20,19 +78,13 @@
 	}
 }
 
-func newProtectedElements() protectedElements {
-	return protectedElements{
-		images: map[string]struct{}{},
-	}
-}
-
 // ProtectImages protects existing images and on linux frozen images from being
 // cleaned up at the end of test runs
 func ProtectImages(t testingT, testEnv *Execution) {
 	images := getExistingImages(t, testEnv)
 
-	if testEnv.DaemonInfo.OSType == "linux" {
-		images = append(images, ensureFrozenImagesLinux(t, testEnv)...)
+	if testEnv.OSType == "linux" {
+		images = append(images, frozenImages...)
 	}
 	testEnv.ProtectImage(t, images...)
 }
@@ -42,6 +94,7 @@
 	filter := filters.NewArgs()
 	filter.Add("dangling", "false")
 	imageList, err := client.ImageList(context.Background(), types.ImageListOptions{
+		All:     true,
 		Filters: filter,
 	})
 	require.NoError(t, err, "failed to list images")
@@ -68,11 +121,85 @@
 	return result
 }
 
-func ensureFrozenImagesLinux(t testingT, testEnv *Execution) []string {
-	images := []string{"busybox:latest", "hello-world:frozen", "debian:jessie"}
-	err := load.FrozenImagesLinux(testEnv.APIClient(), images...)
-	if err != nil {
-		t.Fatalf("Failed to load frozen images: %s", err)
+// ProtectNetwork adds the specified network(s) to be protected in case of
+// clean
+func (e *Execution) ProtectNetwork(t testingT, networks ...string) {
+	for _, network := range networks {
+		e.protectedElements.networks[network] = struct{}{}
 	}
-	return images
+}
+
+// ProtectNetworks protects existing networks from being cleaned up at the end
+// of test runs
+func ProtectNetworks(t testingT, testEnv *Execution) {
+	networks := getExistingNetworks(t, testEnv)
+	testEnv.ProtectNetwork(t, networks...)
+}
+
+func getExistingNetworks(t require.TestingT, testEnv *Execution) []string {
+	client := testEnv.APIClient()
+	networkList, err := client.NetworkList(context.Background(), types.NetworkListOptions{})
+	require.NoError(t, err, "failed to list networks")
+
+	networks := []string{}
+	for _, network := range networkList {
+		networks = append(networks, network.ID)
+	}
+	return networks
+}
+
+// ProtectPlugin adds the specified plugin(s) to be protected in case of clean
+func (e *Execution) ProtectPlugin(t testingT, plugins ...string) {
+	for _, plugin := range plugins {
+		e.protectedElements.plugins[plugin] = struct{}{}
+	}
+}
+
+// ProtectPlugins protects existing plugins from being cleaned up at the end of
+// test runs
+func ProtectPlugins(t testingT, testEnv *Execution) {
+	plugins := getExistingPlugins(t, testEnv)
+	testEnv.ProtectPlugin(t, plugins...)
+}
+
+func getExistingPlugins(t require.TestingT, testEnv *Execution) []string {
+	client := testEnv.APIClient()
+	pluginList, err := client.PluginList(context.Background(), filters.Args{})
+	// Docker EE does not allow cluster-wide plugin management.
+	if dclient.IsErrNotImplemented(err) {
+		return []string{}
+	}
+	require.NoError(t, err, "failed to list plugins")
+
+	plugins := []string{}
+	for _, plugin := range pluginList {
+		plugins = append(plugins, plugin.Name)
+	}
+	return plugins
+}
+
+// ProtectVolume adds the specified volume(s) to be protected in case of clean
+func (e *Execution) ProtectVolume(t testingT, volumes ...string) {
+	for _, volume := range volumes {
+		e.protectedElements.volumes[volume] = struct{}{}
+	}
+}
+
+// ProtectVolumes protects existing volumes from being cleaned up at the end of
+// test runs
+func ProtectVolumes(t testingT, testEnv *Execution) {
+	volumes := getExistingVolumes(t, testEnv)
+	testEnv.ProtectVolume(t, volumes...)
+}
+
+func getExistingVolumes(t require.TestingT, testEnv *Execution) []string {
+	client := testEnv.APIClient()
+	volumeList, err := client.VolumeList(context.Background(), filters.Args{})
+	require.NoError(t, err, "failed to list volumes")
+
+	volumes := []string{}
+	for _, volume := range volumeList.Volumes {
+		volumes = append(volumes, volume.Name)
+	}
+	return volumes
 }
diff --git a/internal/testutil/helpers.go b/internal/testutil/helpers.go
index a760569..287b3cb 100644
--- a/internal/testutil/helpers.go
+++ b/internal/testutil/helpers.go
@@ -1,6 +1,8 @@
 package testutil
 
 import (
+	"io"
+
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
@@ -11,3 +13,15 @@
 	require.Error(t, err, msgAndArgs...)
 	assert.Contains(t, err.Error(), expectedError, msgAndArgs...)
 }
+
+// DevZero acts like /dev/zero but in an OS-independent fashion.
+var DevZero io.Reader = devZero{}
+
+type devZero struct{}
+
+func (d devZero) Read(p []byte) (n int, err error) {
+	for i := 0; i < len(p); i++ {
+		p[i] = '\x00'
+	}
+	return len(p), nil
+}
diff --git a/internal/testutil/stringutils.go b/internal/testutil/stringutils.go
new file mode 100644
index 0000000..76cf8d8
--- /dev/null
+++ b/internal/testutil/stringutils.go
@@ -0,0 +1,14 @@
+package testutil
+
+import "math/rand"
+
+// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
+func GenerateRandomAlphaOnlyString(n int) string {
+	// make a really long string
+	letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+	b := make([]byte, n)
+	for i := range b {
+		b[i] = letters[rand.Intn(len(letters))]
+	}
+	return string(b)
+}
diff --git a/internal/testutil/stringutils_test.go b/internal/testutil/stringutils_test.go
new file mode 100644
index 0000000..2985a06
--- /dev/null
+++ b/internal/testutil/stringutils_test.go
@@ -0,0 +1,33 @@
+package testutil
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+)
+
+func testLengthHelper(generator func(int) string, t *testing.T) {
+	expectedLength := 20
+	s := generator(expectedLength)
+	assert.Equal(t, expectedLength, len(s))
+}
+
+func testUniquenessHelper(generator func(int) string, t *testing.T) {
+	repeats := 25
+	set := make(map[string]struct{}, repeats)
+	for i := 0; i < repeats; i = i + 1 {
+		str := generator(64)
+		assert.Equal(t, 64, len(str))
+		_, ok := set[str]
+		assert.False(t, ok, "Random number is repeated")
+		set[str] = struct{}{}
+	}
+}
+
+func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) {
+	testLengthHelper(GenerateRandomAlphaOnlyString, t)
+}
+
+func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) {
+	testUniquenessHelper(GenerateRandomAlphaOnlyString, t)
+}
diff --git a/layer/empty.go b/layer/empty.go
index cf04aa1..7924a56 100644
--- a/layer/empty.go
+++ b/layer/empty.go
@@ -55,7 +55,7 @@
 	return make(map[string]string), nil
 }
 
-func (el *emptyLayer) Platform() Platform {
+func (el *emptyLayer) OS() OS {
 	return ""
 }
 
diff --git a/layer/empty_test.go b/layer/empty_test.go
index 5555dbd..abafc23 100644
--- a/layer/empty_test.go
+++ b/layer/empty_test.go
@@ -28,6 +28,12 @@
 		t.Fatal("expected zero diffsize for empty layer")
 	}
 
+	meta, err := EmptyLayer.Metadata()
+
+	if len(meta) != 0 || err != nil {
+		t.Fatal("expected zero length metadata for empty layer")
+	}
+
 	tarStream, err := EmptyLayer.TarStream()
 	if err != nil {
 		t.Fatalf("error streaming tar for empty layer: %v", err)
diff --git a/layer/filestore_unix.go b/layer/filestore_unix.go
index fe8a4f8..35e282e 100644
--- a/layer/filestore_unix.go
+++ b/layer/filestore_unix.go
@@ -2,12 +2,12 @@
 
 package layer
 
-// SetPlatform writes the "platform" file to the layer filestore
-func (fm *fileMetadataTransaction) SetPlatform(platform Platform) error {
+// SetOS writes the "os" file to the layer filestore
+func (fm *fileMetadataTransaction) SetOS(os OS) error {
 	return nil
 }
 
-// GetPlatform reads the "platform" file from the layer filestore
-func (fms *fileMetadataStore) GetPlatform(layer ChainID) (Platform, error) {
+// GetOS reads the "os" file from the layer filestore
+func (fms *fileMetadataStore) GetOS(layer ChainID) (OS, error) {
 	return "", nil
 }
diff --git a/layer/filestore_windows.go b/layer/filestore_windows.go
index 066456d..25daa21 100644
--- a/layer/filestore_windows.go
+++ b/layer/filestore_windows.go
@@ -7,19 +7,19 @@
 	"strings"
 )
 
-// SetPlatform writes the "platform" file to the layer filestore
-func (fm *fileMetadataTransaction) SetPlatform(platform Platform) error {
-	if platform == "" {
+// SetOS writes the "os" file to the layer filestore
+func (fm *fileMetadataTransaction) SetOS(os OS) error {
+	if os == "" {
 		return nil
 	}
-	return fm.ws.WriteFile("platform", []byte(platform), 0644)
+	return fm.ws.WriteFile("os", []byte(os), 0644)
 }
 
-// GetPlatform reads the "platform" file from the layer filestore
-func (fms *fileMetadataStore) GetPlatform(layer ChainID) (Platform, error) {
-	contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "platform"))
+// GetOS reads the "os" file from the layer filestore
+func (fms *fileMetadataStore) GetOS(layer ChainID) (OS, error) {
+	contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "os"))
 	if err != nil {
-		// For backwards compatibility, the platform file may not exist. Default to "windows" if missing.
+		// For backwards compatibility, the os file may not exist. Default to "windows" if missing.
 		if os.IsNotExist(err) {
 			return "windows", nil
 		}
@@ -28,8 +28,8 @@
 	content := strings.TrimSpace(string(contentBytes))
 
 	if content != "windows" && content != "linux" {
-		return "", fmt.Errorf("invalid platform value: %s", content)
+		return "", fmt.Errorf("invalid operating system value: %s", content)
 	}
 
-	return Platform(content), nil
+	return OS(content), nil
 }
diff --git a/layer/layer.go b/layer/layer.go
index e269ef8..08fc789 100644
--- a/layer/layer.go
+++ b/layer/layer.go
@@ -15,6 +15,7 @@
 
 	"github.com/docker/distribution"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/opencontainers/go-digest"
 	"github.com/sirupsen/logrus"
 )
@@ -52,8 +53,8 @@
 	ErrMaxDepthExceeded = errors.New("max depth exceeded")
 
 	// ErrNotSupported is used when the action is not supported
-	// on the current platform
-	ErrNotSupported = errors.New("not support on this platform")
+	// on the current host operating system.
+	ErrNotSupported = errors.New("not support on this host operating system")
 )
 
 // ChainID is the content-addressable ID of a layer.
@@ -64,11 +65,11 @@
 	return string(id)
 }
 
-// Platform is the platform of a layer
-type Platform string
+// OS is the operating system of a layer
+type OS string
 
-// String returns a string rendition of layers target platform
-func (id Platform) String() string {
+// String returns a string rendition of layers target operating system
+func (id OS) String() string {
 	return string(id)
 }
 
@@ -107,8 +108,8 @@
 	// Parent returns the next layer in the layer chain.
 	Parent() Layer
 
-	// Platform returns the platform of the layer
-	Platform() Platform
+	// OS returns the operating system of the layer
+	OS() OS
 
 	// Size returns the size of the entire layer chain. The size
 	// is calculated from the total size of all files in the layers.
@@ -137,7 +138,7 @@
 
 	// Mount mounts the RWLayer and returns the filesystem path
 	// the to the writable layer.
-	Mount(mountLabel string) (string, error)
+	Mount(mountLabel string) (containerfs.ContainerFS, error)
 
 	// Unmount unmounts the RWLayer. This should be called
 	// for every mount. If there are multiple mount calls
@@ -178,7 +179,7 @@
 // writable mount. Changes made here will
 // not be included in the Tar stream of the
 // RWLayer.
-type MountInit func(root string) error
+type MountInit func(root containerfs.ContainerFS) error
 
 // CreateRWLayerOpts contains optional arguments to be passed to CreateRWLayer
 type CreateRWLayerOpts struct {
@@ -190,7 +191,7 @@
 // Store represents a backend for managing both
 // read-only and read-write layers.
 type Store interface {
-	Register(io.Reader, ChainID, Platform) (Layer, error)
+	Register(io.Reader, ChainID, OS) (Layer, error)
 	Get(ChainID) (Layer, error)
 	Map() map[ChainID]Layer
 	Release(Layer) ([]Metadata, error)
@@ -208,7 +209,7 @@
 // DescribableStore represents a layer store capable of storing
 // descriptors for layers.
 type DescribableStore interface {
-	RegisterWithDescriptor(io.Reader, ChainID, Platform, distribution.Descriptor) (Layer, error)
+	RegisterWithDescriptor(io.Reader, ChainID, OS, distribution.Descriptor) (Layer, error)
 }
 
 // MetadataTransaction represents functions for setting layer metadata
@@ -219,7 +220,7 @@
 	SetDiffID(DiffID) error
 	SetCacheID(string) error
 	SetDescriptor(distribution.Descriptor) error
-	SetPlatform(Platform) error
+	SetOS(OS) error
 	TarSplitWriter(compressInput bool) (io.WriteCloser, error)
 
 	Commit(ChainID) error
@@ -240,7 +241,7 @@
 	GetDiffID(ChainID) (DiffID, error)
 	GetCacheID(ChainID) (string, error)
 	GetDescriptor(ChainID) (distribution.Descriptor, error)
-	GetPlatform(ChainID) (Platform, error)
+	GetOS(ChainID) (OS, error)
 	TarSplitReader(ChainID) (io.ReadCloser, error)
 
 	SetMountID(string, string) error
diff --git a/layer/layer_store.go b/layer/layer_store.go
index 7283014..27abcde 100644
--- a/layer/layer_store.go
+++ b/layer/layer_store.go
@@ -39,7 +39,7 @@
 
 	useTarSplit bool
 
-	platform string
+	os string
 }
 
 // StoreOptions are the options used to create a new Store instance
@@ -51,7 +51,7 @@
 	IDMappings                *idtools.IDMappings
 	PluginGetter              plugingetter.PluginGetter
 	ExperimentalEnabled       bool
-	Platform                  string
+	OS                        string
 }
 
 // NewStoreFromOptions creates a new Store instance
@@ -73,13 +73,13 @@
 		return nil, err
 	}
 
-	return NewStoreFromGraphDriver(fms, driver, options.Platform)
+	return NewStoreFromGraphDriver(fms, driver, options.OS)
 }
 
 // NewStoreFromGraphDriver creates a new Store instance using the provided
 // metadata store and graph driver. The metadata store will be used to restore
 // the Store.
-func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, platform string) (Store, error) {
+func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver, os string) (Store, error) {
 	caps := graphdriver.Capabilities{}
 	if capDriver, ok := driver.(graphdriver.CapabilityDriver); ok {
 		caps = capDriver.Capabilities()
@@ -91,7 +91,7 @@
 		layerMap:    map[ChainID]*roLayer{},
 		mounts:      map[string]*mountedLayer{},
 		useTarSplit: !caps.ReproducesExactDiffs,
-		platform:    platform,
+		os:          os,
 	}
 
 	ids, mounts, err := store.List()
@@ -150,9 +150,9 @@
 		return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err)
 	}
 
-	platform, err := ls.store.GetPlatform(layer)
+	os, err := ls.store.GetOS(layer)
 	if err != nil {
-		return nil, fmt.Errorf("failed to get platform for %s: %s", layer, err)
+		return nil, fmt.Errorf("failed to get operating system for %s: %s", layer, err)
 	}
 
 	cl = &roLayer{
@@ -163,7 +163,7 @@
 		layerStore: ls,
 		references: map[Layer]struct{}{},
 		descriptor: descriptor,
-		platform:   platform,
+		os:         os,
 	}
 
 	if parent != "" {
@@ -259,11 +259,11 @@
 	return nil
 }
 
-func (ls *layerStore) Register(ts io.Reader, parent ChainID, platform Platform) (Layer, error) {
-	return ls.registerWithDescriptor(ts, parent, platform, distribution.Descriptor{})
+func (ls *layerStore) Register(ts io.Reader, parent ChainID, os OS) (Layer, error) {
+	return ls.registerWithDescriptor(ts, parent, os, distribution.Descriptor{})
 }
 
-func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) {
+func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, os OS, descriptor distribution.Descriptor) (Layer, error) {
 	// err is used to hold the error which will always trigger
 	// cleanup of creates sources but may not be an error returned
 	// to the caller (already exists).
@@ -271,10 +271,10 @@
 	var pid string
 	var p *roLayer
 
-	// Integrity check - ensure we are creating something for the correct platform
+	// Integrity check - ensure we are creating something for the correct operating system
 	if system.LCOWSupported() {
-		if strings.ToLower(ls.platform) != strings.ToLower(string(platform)) {
-			return nil, fmt.Errorf("cannot create entry for platform %q in layer store for platform %q", platform, ls.platform)
+		if strings.ToLower(ls.os) != strings.ToLower(string(os)) {
+			return nil, fmt.Errorf("cannot create entry for operating system %q in layer store for operating system %q", os, ls.os)
 		}
 	}
 
@@ -306,7 +306,7 @@
 		layerStore:     ls,
 		references:     map[Layer]struct{}{},
 		descriptor:     descriptor,
-		platform:       platform,
+		os:             os,
 	}
 
 	if err = ls.driver.Create(layer.cacheID, pid, nil); err != nil {
@@ -749,5 +749,5 @@
 	if err != nil {
 		return nil, err
 	}
-	return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil
+	return &fileGetPutter{storage.NewPathFileGetter(p.Path()), n.Driver, id}, nil
 }
diff --git a/layer/layer_store_windows.go b/layer/layer_store_windows.go
index ccbf6dd..8e93119 100644
--- a/layer/layer_store_windows.go
+++ b/layer/layer_store_windows.go
@@ -6,6 +6,6 @@
 	"github.com/docker/distribution"
 )
 
-func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, platform Platform, descriptor distribution.Descriptor) (Layer, error) {
-	return ls.registerWithDescriptor(ts, parent, platform, descriptor)
+func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, os OS, descriptor distribution.Descriptor) (Layer, error) {
+	return ls.registerWithDescriptor(ts, parent, os, descriptor)
 }
diff --git a/layer/layer_test.go b/layer/layer_test.go
index 8ec5b4d..6936fae 100644
--- a/layer/layer_test.go
+++ b/layer/layer_test.go
@@ -10,9 +10,11 @@
 	"strings"
 	"testing"
 
+	"github.com/containerd/continuity/driver"
 	"github.com/docker/docker/daemon/graphdriver"
 	"github.com/docker/docker/daemon/graphdriver/vfs"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/stringid"
 	"github.com/opencontainers/go-digest"
@@ -82,7 +84,7 @@
 	}
 }
 
-type layerInit func(root string) error
+type layerInit func(root containerfs.ContainerFS) error
 
 func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) {
 	containerID := stringid.GenerateRandomID()
@@ -91,12 +93,12 @@
 		return nil, err
 	}
 
-	path, err := mount.Mount("")
+	pathFS, err := mount.Mount("")
 	if err != nil {
 		return nil, err
 	}
 
-	if err := layerFunc(path); err != nil {
+	if err := layerFunc(pathFS); err != nil {
 		return nil, err
 	}
 
@@ -106,7 +108,7 @@
 	}
 	defer ts.Close()
 
-	layer, err := ls.Register(ts, parent, Platform(runtime.GOOS))
+	layer, err := ls.Register(ts, parent, OS(runtime.GOOS))
 	if err != nil {
 		return nil, err
 	}
@@ -123,7 +125,7 @@
 }
 
 type FileApplier interface {
-	ApplyFile(root string) error
+	ApplyFile(root containerfs.ContainerFS) error
 }
 
 type testFile struct {
@@ -140,25 +142,25 @@
 	}
 }
 
-func (tf *testFile) ApplyFile(root string) error {
-	fullPath := filepath.Join(root, tf.name)
-	if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil {
+func (tf *testFile) ApplyFile(root containerfs.ContainerFS) error {
+	fullPath := root.Join(root.Path(), tf.name)
+	if err := root.MkdirAll(root.Dir(fullPath), 0755); err != nil {
 		return err
 	}
 	// Check if already exists
-	if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission {
-		if err := os.Chmod(fullPath, tf.permission); err != nil {
+	if stat, err := root.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission {
+		if err := root.Lchmod(fullPath, tf.permission); err != nil {
 			return err
 		}
 	}
-	if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil {
+	if err := driver.WriteFile(root, fullPath, tf.content, tf.permission); err != nil {
 		return err
 	}
 	return nil
 }
 
 func initWithFiles(files ...FileApplier) layerInit {
-	return func(root string) error {
+	return func(root containerfs.ContainerFS) error {
 		for _, f := range files {
 			if err := f.ApplyFile(root); err != nil {
 				return err
@@ -288,7 +290,7 @@
 		t.Fatal(err)
 	}
 
-	b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt"))
+	b, err := driver.ReadFile(path2, path2.Join(path2.Path(), "testfile.txt"))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -391,12 +393,12 @@
 		t.Fatal(err)
 	}
 
-	path, err := m.Mount("")
+	pathFS, err := m.Mount("")
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil {
+	if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt"), []byte("nothing here"), 0644); err != nil {
 		t.Fatal(err)
 	}
 
@@ -430,20 +432,20 @@
 
 	if mountPath, err := m2.Mount(""); err != nil {
 		t.Fatal(err)
-	} else if path != mountPath {
-		t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
+	} else if pathFS.Path() != mountPath.Path() {
+		t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path())
 	}
 
 	if mountPath, err := m2.Mount(""); err != nil {
 		t.Fatal(err)
-	} else if path != mountPath {
-		t.Fatalf("Unexpected path %s, expected %s", mountPath, path)
+	} else if pathFS.Path() != mountPath.Path() {
+		t.Fatalf("Unexpected path %s, expected %s", mountPath.Path(), pathFS.Path())
 	}
 	if err := m2.Unmount(); err != nil {
 		t.Fatal(err)
 	}
 
-	b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt"))
+	b, err := driver.ReadFile(pathFS, pathFS.Join(pathFS.Path(), "testfile.txt"))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -499,7 +501,7 @@
 		t.Fatal(err)
 	}
 
-	layer1, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS))
+	layer1, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -518,7 +520,7 @@
 		t.Fatal(err)
 	}
 
-	layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), Platform(runtime.GOOS))
+	layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID(), OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -618,7 +620,7 @@
 	defer os.RemoveAll(td)
 
 	for _, f := range files {
-		if err := f.ApplyFile(td); err != nil {
+		if err := f.ApplyFile(containerfs.NewLocalContainerFS(td)); err != nil {
 			return nil, err
 		}
 	}
@@ -686,12 +688,12 @@
 		t.Fatal(err)
 	}
 
-	layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), Platform(runtime.GOOS))
+	layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), Platform(runtime.GOOS))
+	layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID(), OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -726,12 +728,12 @@
 		t.Fatal(err)
 	}
 
-	layer1, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS))
+	layer1, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	layer2, err := ls.Register(bytes.NewReader(tar2), "", Platform(runtime.GOOS))
+	layer2, err := ls.Register(bytes.NewReader(tar2), "", OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/layer/layer_unix.go b/layer/layer_unix.go
index 776b78a..d77e2fc 100644
--- a/layer/layer_unix.go
+++ b/layer/layer_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd darwin openbsd solaris
+// +build linux freebsd darwin openbsd
 
 package layer
 
diff --git a/layer/layer_windows.go b/layer/layer_windows.go
index a1c1953..d02d4d0 100644
--- a/layer/layer_windows.go
+++ b/layer/layer_windows.go
@@ -1,6 +1,15 @@
 package layer
 
-import "errors"
+import (
+	"errors"
+)
+
+// Getter is an interface to get the path to a layer on the host.
+type Getter interface {
+	// GetLayerPath gets the path for the layer. This is different from Get()
+	// since that returns an interface to account for umountable layers.
+	GetLayerPath(id string) (string, error)
+}
 
 // GetLayerPath returns the path to a layer
 func GetLayerPath(s Store, layer ChainID) (string, error) {
@@ -16,6 +25,10 @@
 		return "", ErrLayerDoesNotExist
 	}
 
+	if layerGetter, ok := ls.driver.(Getter); ok {
+		return layerGetter.GetLayerPath(rl.cacheID)
+	}
+
 	path, err := ls.driver.Get(rl.cacheID, "")
 	if err != nil {
 		return "", err
@@ -25,7 +38,7 @@
 		return "", err
 	}
 
-	return path, nil
+	return path.Path(), nil
 }
 
 func (ls *layerStore) mountID(name string) string {
diff --git a/layer/migration_test.go b/layer/migration_test.go
index 7364e6c..cc63a01 100644
--- a/layer/migration_test.go
+++ b/layer/migration_test.go
@@ -110,14 +110,14 @@
 		t.Fatal(err)
 	}
 
-	layer1b, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS))
+	layer1b, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
 
 	assertReferences(t, layer1a, layer1b)
 	// Attempt register, should be same
-	layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), Platform(runtime.GOOS))
+	layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -238,7 +238,7 @@
 		t.Fatal(err)
 	}
 
-	layer1b, err := ls.Register(bytes.NewReader(tar1), "", Platform(runtime.GOOS))
+	layer1b, err := ls.Register(bytes.NewReader(tar1), "", OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
@@ -246,7 +246,7 @@
 	assertReferences(t, layer1a, layer1b)
 
 	// Attempt register, should be same
-	layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), Platform(runtime.GOOS))
+	layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID(), OS(runtime.GOOS))
 	if err != nil {
 		t.Fatal(err)
 	}
diff --git a/layer/mount_test.go b/layer/mount_test.go
index f5799e7..44d461f 100644
--- a/layer/mount_test.go
+++ b/layer/mount_test.go
@@ -2,13 +2,13 @@
 
 import (
 	"io/ioutil"
-	"os"
-	"path/filepath"
 	"runtime"
 	"sort"
 	"testing"
 
+	"github.com/containerd/continuity/driver"
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 )
 
 func TestMountInit(t *testing.T) {
@@ -28,7 +28,7 @@
 		t.Fatal(err)
 	}
 
-	mountInit := func(root string) error {
+	mountInit := func(root containerfs.ContainerFS) error {
 		return initfile.ApplyFile(root)
 	}
 
@@ -40,22 +40,22 @@
 		t.Fatal(err)
 	}
 
-	path, err := m.Mount("")
+	pathFS, err := m.Mount("")
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	f, err := os.Open(filepath.Join(path, "testfile.txt"))
+	fi, err := pathFS.Stat(pathFS.Join(pathFS.Path(), "testfile.txt"))
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	f, err := pathFS.Open(pathFS.Join(pathFS.Path(), "testfile.txt"))
 	if err != nil {
 		t.Fatal(err)
 	}
 	defer f.Close()
 
-	fi, err := f.Stat()
-	if err != nil {
-		t.Fatal(err)
-	}
-
 	b, err := ioutil.ReadAll(f)
 	if err != nil {
 		t.Fatal(err)
@@ -88,7 +88,7 @@
 		t.Fatal(err)
 	}
 
-	mountInit := func(root string) error {
+	mountInit := func(root containerfs.ContainerFS) error {
 		return newTestFile("file-init", contentInit, 0777).ApplyFile(root)
 	}
 	rwLayerOpts := &CreateRWLayerOpts{
@@ -100,12 +100,12 @@
 		t.Fatal(err)
 	}
 
-	path, err := m.Mount("")
+	pathFS, err := m.Mount("")
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil {
+	if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "file2"), content2, 0755); err != nil {
 		t.Fatal(err)
 	}
 
@@ -140,7 +140,7 @@
 		t.Fatal(err)
 	}
 
-	mountInit := func(root string) error {
+	mountInit := func(root containerfs.ContainerFS) error {
 		return initfile.ApplyFile(root)
 	}
 	rwLayerOpts := &CreateRWLayerOpts{
@@ -152,28 +152,28 @@
 		t.Fatal(err)
 	}
 
-	path, err := m.Mount("")
+	pathFS, err := m.Mount("")
 	if err != nil {
 		t.Fatal(err)
 	}
 
-	if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil {
+	if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile1.txt"), 0755); err != nil {
 		t.Fatal(err)
 	}
 
-	if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil {
+	if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile1.txt"), []byte("mount data!"), 0755); err != nil {
 		t.Fatal(err)
 	}
 
-	if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil {
+	if err := pathFS.Remove(pathFS.Join(pathFS.Path(), "testfile2.txt")); err != nil {
 		t.Fatal(err)
 	}
 
-	if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil {
+	if err := pathFS.Lchmod(pathFS.Join(pathFS.Path(), "testfile3.txt"), 0755); err != nil {
 		t.Fatal(err)
 	}
 
-	if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil {
+	if err := driver.WriteFile(pathFS, pathFS.Join(pathFS.Path(), "testfile4.txt"), []byte("mount data!"), 0644); err != nil {
 		t.Fatal(err)
 	}
 
diff --git a/layer/mounted_layer.go b/layer/mounted_layer.go
index a5cfcfa..47ef966 100644
--- a/layer/mounted_layer.go
+++ b/layer/mounted_layer.go
@@ -4,6 +4,7 @@
 	"io"
 
 	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/containerfs"
 )
 
 type mountedLayer struct {
@@ -88,7 +89,7 @@
 	*mountedLayer
 }
 
-func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) {
+func (rl *referencedRWLayer) Mount(mountLabel string) (containerfs.ContainerFS, error) {
 	return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel)
 }
 
diff --git a/layer/ro_layer.go b/layer/ro_layer.go
index e03d78b..f634194 100644
--- a/layer/ro_layer.go
+++ b/layer/ro_layer.go
@@ -16,7 +16,7 @@
 	size       int64
 	layerStore *layerStore
 	descriptor distribution.Descriptor
-	platform   Platform
+	os         OS
 
 	referenceCount int
 	references     map[Layer]struct{}
@@ -143,7 +143,7 @@
 			return err
 		}
 	}
-	if err := tx.SetPlatform(layer.platform); err != nil {
+	if err := tx.SetOS(layer.os); err != nil {
 		return err
 	}
 
diff --git a/layer/ro_layer_unix.go b/layer/ro_layer_unix.go
index 1b36856..f0c2003 100644
--- a/layer/ro_layer_unix.go
+++ b/layer/ro_layer_unix.go
@@ -2,6 +2,6 @@
 
 package layer
 
-func (rl *roLayer) Platform() Platform {
+func (rl *roLayer) OS() OS {
 	return ""
 }
diff --git a/layer/ro_layer_windows.go b/layer/ro_layer_windows.go
index 6679bdf..1210f10 100644
--- a/layer/ro_layer_windows.go
+++ b/layer/ro_layer_windows.go
@@ -8,9 +8,9 @@
 	return rl.descriptor
 }
 
-func (rl *roLayer) Platform() Platform {
-	if rl.platform == "" {
+func (rl *roLayer) OS() OS {
+	if rl.os == "" {
 		return "windows"
 	}
-	return rl.platform
+	return rl.os
 }
diff --git a/libcontainerd/client.go b/libcontainerd/client.go
deleted file mode 100644
index c9004b8..0000000
--- a/libcontainerd/client.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package libcontainerd
-
-import (
-	"fmt"
-	"sync"
-
-	"github.com/docker/docker/pkg/locker"
-)
-
-// clientCommon contains the platform agnostic fields used in the client structure
-type clientCommon struct {
-	backend    Backend
-	containers map[string]*container
-	locker     *locker.Locker
-	mapMutex   sync.RWMutex // protects read/write operations from containers map
-}
-
-func (clnt *client) lock(containerID string) {
-	clnt.locker.Lock(containerID)
-}
-
-func (clnt *client) unlock(containerID string) {
-	clnt.locker.Unlock(containerID)
-}
-
-// must hold a lock for cont.containerID
-func (clnt *client) appendContainer(cont *container) {
-	clnt.mapMutex.Lock()
-	clnt.containers[cont.containerID] = cont
-	clnt.mapMutex.Unlock()
-}
-func (clnt *client) deleteContainer(containerID string) {
-	clnt.mapMutex.Lock()
-	delete(clnt.containers, containerID)
-	clnt.mapMutex.Unlock()
-}
-
-func (clnt *client) getContainer(containerID string) (*container, error) {
-	clnt.mapMutex.RLock()
-	container, ok := clnt.containers[containerID]
-	defer clnt.mapMutex.RUnlock()
-	if !ok {
-		return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error
-	}
-	return container, nil
-}
diff --git a/libcontainerd/client_daemon.go b/libcontainerd/client_daemon.go
new file mode 100644
index 0000000..b0cdcfc
--- /dev/null
+++ b/libcontainerd/client_daemon.go
@@ -0,0 +1,806 @@
+// +build !windows
+
+package libcontainerd
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"google.golang.org/grpc"
+
+	"github.com/containerd/containerd"
+	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/archive"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/linux/runcopts"
+	"github.com/containerd/typeurl"
+	"github.com/docker/docker/pkg/ioutils"
+	"github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+// InitProcessName is the name given to the first process of a
+// container
+const InitProcessName = "init"
+
+type container struct {
+	sync.Mutex
+
+	bundleDir string
+	ctr       containerd.Container
+	task      containerd.Task
+	execs     map[string]containerd.Process
+	oomKilled bool
+}
+
+type client struct {
+	sync.RWMutex // protects containers map
+
+	remote   *containerd.Client
+	stateDir string
+	logger   *logrus.Entry
+
+	namespace  string
+	backend    Backend
+	eventQ     queue
+	containers map[string]*container
+}
+
+func (c *client) Version(ctx context.Context) (containerd.Version, error) {
+	return c.remote.Version(ctx)
+}
+
+func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (alive bool, pid int, err error) {
+	c.Lock()
+	defer c.Unlock()
+
+	var cio containerd.IO
+	defer func() {
+		err = wrapError(err)
+	}()
+
+	ctr, err := c.remote.LoadContainer(ctx, id)
+	if err != nil {
+		return false, -1, errors.WithStack(err)
+	}
+
+	defer func() {
+		if err != nil && cio != nil {
+			cio.Cancel()
+			cio.Close()
+		}
+	}()
+
+	t, err := ctr.Task(ctx, func(fifos *containerd.FIFOSet) (containerd.IO, error) {
+		io, err := newIOPipe(fifos)
+		if err != nil {
+			return nil, err
+		}
+
+		cio, err = attachStdio(io)
+		return cio, err
+	})
+	if err != nil && !strings.Contains(err.Error(), "no running task found") {
+		return false, -1, err
+	}
+
+	if t != nil {
+		s, err := t.Status(ctx)
+		if err != nil {
+			return false, -1, err
+		}
+
+		alive = s.Status != containerd.Stopped
+		pid = int(t.Pid())
+	}
+	c.containers[id] = &container{
+		bundleDir: filepath.Join(c.stateDir, id),
+		ctr:       ctr,
+		task:      t,
+		// TODO(mlaventure): load execs
+	}
+
+	c.logger.WithFields(logrus.Fields{
+		"container": id,
+		"alive":     alive,
+		"pid":       pid,
+	}).Debug("restored container")
+
+	return alive, pid, nil
+}
+
+func (c *client) Create(ctx context.Context, id string, ociSpec *specs.Spec, runtimeOptions interface{}) error {
+	if ctr := c.getContainer(id); ctr != nil {
+		return errors.WithStack(newConflictError("id already in use"))
+	}
+
+	bdir, err := prepareBundleDir(filepath.Join(c.stateDir, id), ociSpec)
+	if err != nil {
+		return wrapSystemError(errors.Wrap(err, "prepare bundle dir failed"))
+	}
+
+	c.logger.WithField("bundle", bdir).WithField("root", ociSpec.Root.Path).Debug("bundle dir created")
+
+	cdCtr, err := c.remote.NewContainer(ctx, id,
+		containerd.WithSpec(ociSpec),
+		// TODO(mlaventure): when containerd support lcow, revisit runtime value
+		containerd.WithRuntime(fmt.Sprintf("io.containerd.runtime.v1.%s", runtime.GOOS), runtimeOptions))
+	if err != nil {
+		return err
+	}
+
+	c.Lock()
+	c.containers[id] = &container{
+		bundleDir: bdir,
+		ctr:       cdCtr,
+	}
+	c.Unlock()
+
+	return nil
+}
+
+// Start create and start a task for the specified containerd id
+func (c *client) Start(ctx context.Context, id, checkpointDir string, withStdin bool, attachStdio StdioCallback) (int, error) {
+	ctr := c.getContainer(id)
+	switch {
+	case ctr == nil:
+		return -1, errors.WithStack(newNotFoundError("no such container"))
+	case ctr.task != nil:
+		return -1, errors.WithStack(newConflictError("container already started"))
+	}
+
+	var (
+		cp             *types.Descriptor
+		t              containerd.Task
+		cio            containerd.IO
+		err            error
+		stdinCloseSync = make(chan struct{})
+	)
+
+	if checkpointDir != "" {
+		// write checkpoint to the content store
+		tar := archive.Diff(ctx, "", checkpointDir)
+		cp, err = c.writeContent(ctx, images.MediaTypeContainerd1Checkpoint, checkpointDir, tar)
+		// remove the checkpoint when we're done
+		defer func() {
+			if cp != nil {
+				err := c.remote.ContentStore().Delete(context.Background(), cp.Digest)
+				if err != nil {
+					c.logger.WithError(err).WithFields(logrus.Fields{
+						"ref":    checkpointDir,
+						"digest": cp.Digest,
+					}).Warnf("failed to delete temporary checkpoint entry")
+				}
+			}
+		}()
+		if err := tar.Close(); err != nil {
+			return -1, errors.Wrap(err, "failed to close checkpoint tar stream")
+		}
+		if err != nil {
+			return -1, errors.Wrapf(err, "failed to upload checkpoint to containerd")
+		}
+	}
+
+	spec, err := ctr.ctr.Spec(ctx)
+	if err != nil {
+		return -1, errors.Wrap(err, "failed to retrieve spec")
+	}
+	uid, gid := getSpecUser(spec)
+	t, err = ctr.ctr.NewTask(ctx,
+		func(id string) (containerd.IO, error) {
+			cio, err = c.createIO(ctr.bundleDir, id, InitProcessName, stdinCloseSync, withStdin, spec.Process.Terminal, attachStdio)
+			return cio, err
+		},
+		func(_ context.Context, _ *containerd.Client, info *containerd.TaskInfo) error {
+			info.Checkpoint = cp
+			info.Options = &runcopts.CreateOptions{
+				IoUid: uint32(uid),
+				IoGid: uint32(gid),
+			}
+			return nil
+		})
+	if err != nil {
+		close(stdinCloseSync)
+		if cio != nil {
+			cio.Cancel()
+			cio.Close()
+		}
+		return -1, err
+	}
+
+	c.Lock()
+	c.containers[id].task = t
+	c.Unlock()
+
+	// Signal c.createIO that it can call CloseIO
+	close(stdinCloseSync)
+
+	if err := t.Start(ctx); err != nil {
+		if _, err := t.Delete(ctx); err != nil {
+			c.logger.WithError(err).WithField("container", id).
+				Error("failed to delete task after fail start")
+		}
+		c.Lock()
+		c.containers[id].task = nil
+		c.Unlock()
+		return -1, err
+	}
+
+	return int(t.Pid()), nil
+}
+
+func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
+	ctr := c.getContainer(containerID)
+	switch {
+	case ctr == nil:
+		return -1, errors.WithStack(newNotFoundError("no such container"))
+	case ctr.task == nil:
+		return -1, errors.WithStack(newInvalidParameterError("container is not running"))
+	case ctr.execs != nil && ctr.execs[processID] != nil:
+		return -1, errors.WithStack(newConflictError("id already in use"))
+	}
+
+	var (
+		p              containerd.Process
+		cio            containerd.IO
+		err            error
+		stdinCloseSync = make(chan struct{})
+	)
+	defer func() {
+		if err != nil {
+			if cio != nil {
+				cio.Cancel()
+				cio.Close()
+			}
+		}
+	}()
+
+	p, err = ctr.task.Exec(ctx, processID, spec, func(id string) (containerd.IO, error) {
+		cio, err = c.createIO(ctr.bundleDir, containerID, processID, stdinCloseSync, withStdin, spec.Terminal, attachStdio)
+		return cio, err
+	})
+	if err != nil {
+		close(stdinCloseSync)
+		if cio != nil {
+			cio.Cancel()
+			cio.Close()
+		}
+		return -1, err
+	}
+
+	ctr.Lock()
+	if ctr.execs == nil {
+		ctr.execs = make(map[string]containerd.Process)
+	}
+	ctr.execs[processID] = p
+	ctr.Unlock()
+
+	// Signal c.createIO that it can call CloseIO
+	close(stdinCloseSync)
+
+	if err = p.Start(ctx); err != nil {
+		p.Delete(context.Background())
+		ctr.Lock()
+		delete(ctr.execs, processID)
+		ctr.Unlock()
+		return -1, err
+	}
+
+	return int(p.Pid()), nil
+}
+
+func (c *client) SignalProcess(ctx context.Context, containerID, processID string, signal int) error {
+	p, err := c.getProcess(containerID, processID)
+	if err != nil {
+		return err
+	}
+	return p.Kill(ctx, syscall.Signal(signal))
+}
+
+func (c *client) ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error {
+	p, err := c.getProcess(containerID, processID)
+	if err != nil {
+		return err
+	}
+
+	return p.Resize(ctx, uint32(width), uint32(height))
+}
+
+func (c *client) CloseStdin(ctx context.Context, containerID, processID string) error {
+	p, err := c.getProcess(containerID, processID)
+	if err != nil {
+		return err
+	}
+
+	return p.CloseIO(ctx, containerd.WithStdinCloser)
+}
+
+func (c *client) Pause(ctx context.Context, containerID string) error {
+	p, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return err
+	}
+
+	return p.(containerd.Task).Pause(ctx)
+}
+
+func (c *client) Resume(ctx context.Context, containerID string) error {
+	p, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return err
+	}
+
+	return p.(containerd.Task).Resume(ctx)
+}
+
+func (c *client) Stats(ctx context.Context, containerID string) (*Stats, error) {
+	p, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return nil, err
+	}
+
+	m, err := p.(containerd.Task).Metrics(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	v, err := typeurl.UnmarshalAny(m.Data)
+	if err != nil {
+		return nil, err
+	}
+	return interfaceToStats(m.Timestamp, v), nil
+}
+
+func (c *client) ListPids(ctx context.Context, containerID string) ([]uint32, error) {
+	p, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return nil, err
+	}
+
+	pis, err := p.(containerd.Task).Pids(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	var pids []uint32
+	for _, i := range pis {
+		pids = append(pids, i.Pid)
+	}
+
+	return pids, nil
+}
+
+func (c *client) Summary(ctx context.Context, containerID string) ([]Summary, error) {
+	p, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return nil, err
+	}
+
+	pis, err := p.(containerd.Task).Pids(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	var infos []Summary
+	for _, pi := range pis {
+		i, err := typeurl.UnmarshalAny(pi.Info)
+		if err != nil {
+			return nil, errors.Wrap(err, "unable to decode process details")
+		}
+		s, err := summaryFromInterface(i)
+		if err != nil {
+			return nil, err
+		}
+		infos = append(infos, *s)
+	}
+
+	return infos, nil
+}
+
+func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
+	p, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return 255, time.Now(), nil
+	}
+
+	status, err := p.(containerd.Task).Delete(ctx)
+	if err != nil {
+		return 255, time.Now(), nil
+	}
+
+	c.Lock()
+	if ctr, ok := c.containers[containerID]; ok {
+		ctr.task = nil
+	}
+	c.Unlock()
+
+	return status.ExitCode(), status.ExitTime(), nil
+}
+
+func (c *client) Delete(ctx context.Context, containerID string) error {
+	ctr := c.getContainer(containerID)
+	if ctr == nil {
+		return errors.WithStack(newNotFoundError("no such container"))
+	}
+
+	if err := ctr.ctr.Delete(ctx); err != nil {
+		return err
+	}
+
+	if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" {
+		if err := os.RemoveAll(ctr.bundleDir); err != nil {
+			c.logger.WithError(err).WithFields(logrus.Fields{
+				"container": containerID,
+				"bundle":    ctr.bundleDir,
+			}).Error("failed to remove state dir")
+		}
+	}
+
+	c.removeContainer(containerID)
+
+	return nil
+}
+
+func (c *client) Status(ctx context.Context, containerID string) (Status, error) {
+	ctr := c.getContainer(containerID)
+	if ctr == nil {
+		return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
+	}
+
+	s, err := ctr.task.Status(ctx)
+	if err != nil {
+		return StatusUnknown, err
+	}
+
+	return Status(s.Status), nil
+}
+
+func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
+	p, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return err
+	}
+
+	img, err := p.(containerd.Task).Checkpoint(ctx)
+	if err != nil {
+		return err
+	}
+	// Whatever happens, delete the checkpoint from containerd
+	defer func() {
+		err := c.remote.ImageService().Delete(context.Background(), img.Name())
+		if err != nil {
+			c.logger.WithError(err).WithField("digest", img.Target().Digest).
+				Warnf("failed to delete checkpoint image")
+		}
+	}()
+
+	b, err := content.ReadBlob(ctx, c.remote.ContentStore(), img.Target().Digest)
+	if err != nil {
+		return wrapSystemError(errors.Wrapf(err, "failed to retrieve checkpoint data"))
+	}
+	var index v1.Index
+	if err := json.Unmarshal(b, &index); err != nil {
+		return wrapSystemError(errors.Wrapf(err, "failed to decode checkpoint data"))
+	}
+
+	var cpDesc *v1.Descriptor
+	for _, m := range index.Manifests {
+		if m.MediaType == images.MediaTypeContainerd1Checkpoint {
+			cpDesc = &m
+			break
+		}
+	}
+	if cpDesc == nil {
+		return wrapSystemError(errors.Wrapf(err, "invalid checkpoint"))
+	}
+
+	rat, err := c.remote.ContentStore().ReaderAt(ctx, cpDesc.Digest)
+	if err != nil {
+		return wrapSystemError(errors.Wrapf(err, "failed to get checkpoint reader"))
+	}
+	defer rat.Close()
+	_, err = archive.Apply(ctx, checkpointDir, content.NewReader(rat))
+	if err != nil {
+		return wrapSystemError(errors.Wrapf(err, "failed to read checkpoint reader"))
+	}
+
+	return err
+}
+
+func (c *client) getContainer(id string) *container {
+	c.RLock()
+	ctr := c.containers[id]
+	c.RUnlock()
+
+	return ctr
+}
+
+func (c *client) removeContainer(id string) {
+	c.Lock()
+	delete(c.containers, id)
+	c.Unlock()
+}
+
+func (c *client) getProcess(containerID, processID string) (containerd.Process, error) {
+	ctr := c.getContainer(containerID)
+	switch {
+	case ctr == nil:
+		return nil, errors.WithStack(newNotFoundError("no such container"))
+	case ctr.task == nil:
+		return nil, errors.WithStack(newNotFoundError("container is not running"))
+	case processID == InitProcessName:
+		return ctr.task, nil
+	default:
+		ctr.Lock()
+		defer ctr.Unlock()
+		if ctr.execs == nil {
+			return nil, errors.WithStack(newNotFoundError("no execs"))
+		}
+	}
+
+	p := ctr.execs[processID]
+	if p == nil {
+		return nil, errors.WithStack(newNotFoundError("no such exec"))
+	}
+
+	return p, nil
+}
+
+// createIO creates the io to be used by a process
+// This needs to get a pointer to interface as upon closure the process may not have yet been registered
+func (c *client) createIO(bundleDir, containerID, processID string, stdinCloseSync chan struct{}, withStdin, withTerminal bool, attachStdio StdioCallback) (containerd.IO, error) {
+	fifos := newFIFOSet(bundleDir, containerID, processID, withStdin, withTerminal)
+	io, err := newIOPipe(fifos)
+	if err != nil {
+		return nil, err
+	}
+
+	if io.Stdin != nil {
+		var (
+			err       error
+			stdinOnce sync.Once
+		)
+		pipe := io.Stdin
+		io.Stdin = ioutils.NewWriteCloserWrapper(pipe, func() error {
+			stdinOnce.Do(func() {
+				err = pipe.Close()
+				// Do the rest in a new routine to avoid a deadlock if the
+				// Exec/Start call failed.
+				go func() {
+					<-stdinCloseSync
+					p, err := c.getProcess(containerID, processID)
+					if err == nil {
+						err = p.CloseIO(context.Background(), containerd.WithStdinCloser)
+						if err != nil && strings.Contains(err.Error(), "transport is closing") {
+							err = nil
+						}
+					}
+				}()
+			})
+			return err
+		})
+	}
+
+	cio, err := attachStdio(io)
+	if err != nil {
+		io.Cancel()
+		io.Close()
+	}
+	return cio, err
+}
+
+func (c *client) processEvent(ctr *container, et EventType, ei EventInfo) {
+	c.eventQ.append(ei.ContainerID, func() {
+		err := c.backend.ProcessEvent(ei.ContainerID, et, ei)
+		if err != nil {
+			c.logger.WithError(err).WithFields(logrus.Fields{
+				"container":  ei.ContainerID,
+				"event":      et,
+				"event-info": ei,
+			}).Error("failed to process event")
+		}
+
+		if et == EventExit && ei.ProcessID != ei.ContainerID {
+			var p containerd.Process
+			ctr.Lock()
+			if ctr.execs != nil {
+				p = ctr.execs[ei.ProcessID]
+			}
+			ctr.Unlock()
+			if p == nil {
+				c.logger.WithError(errors.New("no such process")).
+					WithFields(logrus.Fields{
+						"container": ei.ContainerID,
+						"process":   ei.ProcessID,
+					}).Error("exit event")
+				return
+			}
+			_, err = p.Delete(context.Background())
+			if err != nil {
+				c.logger.WithError(err).WithFields(logrus.Fields{
+					"container": ei.ContainerID,
+					"process":   ei.ProcessID,
+				}).Warn("failed to delete process")
+			}
+			c.Lock()
+			delete(ctr.execs, ei.ProcessID)
+			c.Unlock()
+		}
+	})
+}
+
+func (c *client) processEventStream(ctx context.Context) {
+	var (
+		err         error
+		eventStream eventsapi.Events_SubscribeClient
+		ev          *eventsapi.Envelope
+		et          EventType
+		ei          EventInfo
+		ctr         *container
+	)
+	defer func() {
+		if err != nil {
+			select {
+			case <-ctx.Done():
+				c.logger.WithError(ctx.Err()).
+					Info("stopping event stream following graceful shutdown")
+			default:
+				go c.processEventStream(ctx)
+			}
+		}
+	}()
+
+	eventStream, err = c.remote.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
+		Filters: []string{"namespace==" + c.namespace + ",topic~=/tasks/.+"},
+	}, grpc.FailFast(false))
+	if err != nil {
+		return
+	}
+
+	var oomKilled bool
+	for {
+		ev, err = eventStream.Recv()
+		if err != nil {
+			c.logger.WithError(err).Error("failed to get event")
+			return
+		}
+
+		if ev.Event == nil {
+			c.logger.WithField("event", ev).Warn("invalid event")
+			continue
+		}
+
+		v, err := typeurl.UnmarshalAny(ev.Event)
+		if err != nil {
+			c.logger.WithError(err).WithField("event", ev).Warn("failed to unmarshal event")
+			continue
+		}
+
+		c.logger.WithField("topic", ev.Topic).Debug("event")
+
+		switch t := v.(type) {
+		case *eventsapi.TaskCreate:
+			et = EventCreate
+			ei = EventInfo{
+				ContainerID: t.ContainerID,
+				ProcessID:   t.ContainerID,
+				Pid:         t.Pid,
+			}
+		case *eventsapi.TaskStart:
+			et = EventStart
+			ei = EventInfo{
+				ContainerID: t.ContainerID,
+				ProcessID:   t.ContainerID,
+				Pid:         t.Pid,
+			}
+		case *eventsapi.TaskExit:
+			et = EventExit
+			ei = EventInfo{
+				ContainerID: t.ContainerID,
+				ProcessID:   t.ID,
+				Pid:         t.Pid,
+				ExitCode:    t.ExitStatus,
+				ExitedAt:    t.ExitedAt,
+			}
+		case *eventsapi.TaskOOM:
+			et = EventOOM
+			ei = EventInfo{
+				ContainerID: t.ContainerID,
+				OOMKilled:   true,
+			}
+			oomKilled = true
+		case *eventsapi.TaskExecAdded:
+			et = EventExecAdded
+			ei = EventInfo{
+				ContainerID: t.ContainerID,
+				ProcessID:   t.ExecID,
+			}
+		case *eventsapi.TaskExecStarted:
+			et = EventExecStarted
+			ei = EventInfo{
+				ContainerID: t.ContainerID,
+				ProcessID:   t.ExecID,
+				Pid:         t.Pid,
+			}
+		case *eventsapi.TaskPaused:
+			et = EventPaused
+			ei = EventInfo{
+				ContainerID: t.ContainerID,
+			}
+		case *eventsapi.TaskResumed:
+			et = EventResumed
+			ei = EventInfo{
+				ContainerID: t.ContainerID,
+			}
+		default:
+			c.logger.WithFields(logrus.Fields{
+				"topic": ev.Topic,
+				"type":  reflect.TypeOf(t)},
+			).Info("ignoring event")
+			continue
+		}
+
+		ctr = c.getContainer(ei.ContainerID)
+		if ctr == nil {
+			c.logger.WithField("container", ei.ContainerID).Warn("unknown container")
+			continue
+		}
+
+		if oomKilled {
+			ctr.oomKilled = true
+			oomKilled = false
+		}
+		ei.OOMKilled = ctr.oomKilled
+
+		c.processEvent(ctr, et, ei)
+	}
+}
+
+func (c *client) writeContent(ctx context.Context, mediaType, ref string, r io.Reader) (*types.Descriptor, error) {
+	writer, err := c.remote.ContentStore().Writer(ctx, ref, 0, "")
+	if err != nil {
+		return nil, err
+	}
+	defer writer.Close()
+	size, err := io.Copy(writer, r)
+	if err != nil {
+		return nil, err
+	}
+	labels := map[string]string{
+		"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
+	}
+	if err := writer.Commit(ctx, 0, "", content.WithLabels(labels)); err != nil {
+		return nil, err
+	}
+	return &types.Descriptor{
+		MediaType: mediaType,
+		Digest:    writer.Digest(),
+		Size_:     size,
+	}, nil
+}
+
+func wrapError(err error) error {
+	if err != nil {
+		msg := err.Error()
+		for _, s := range []string{"container does not exist", "not found", "no such container"} {
+			if strings.Contains(msg, s) {
+				return wrapNotFoundError(err)
+			}
+		}
+	}
+	return err
+}
diff --git a/libcontainerd/client_daemon_linux.go b/libcontainerd/client_daemon_linux.go
new file mode 100644
index 0000000..0337195
--- /dev/null
+++ b/libcontainerd/client_daemon_linux.go
@@ -0,0 +1,96 @@
+package libcontainerd
+
+import (
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/containerd/containerd"
+	"github.com/docker/docker/pkg/idtools"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func summaryFromInterface(i interface{}) (*Summary, error) {
+	return &Summary{}, nil
+}
+
+func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error {
+	p, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return err
+	}
+
+	// go doesn't like the alias in 1.8, this means this need to be
+	// platform specific
+	return p.(containerd.Task).Update(ctx, containerd.WithResources((*specs.LinuxResources)(resources)))
+}
+
+func hostIDFromMap(id uint32, mp []specs.LinuxIDMapping) int {
+	for _, m := range mp {
+		if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 {
+			return int(m.HostID + id - m.ContainerID)
+		}
+	}
+	return 0
+}
+
+func getSpecUser(ociSpec *specs.Spec) (int, int) {
+	var (
+		uid int
+		gid int
+	)
+
+	for _, ns := range ociSpec.Linux.Namespaces {
+		if ns.Type == specs.UserNamespace {
+			uid = hostIDFromMap(0, ociSpec.Linux.UIDMappings)
+			gid = hostIDFromMap(0, ociSpec.Linux.GIDMappings)
+			break
+		}
+	}
+
+	return uid, gid
+}
+
+func prepareBundleDir(bundleDir string, ociSpec *specs.Spec) (string, error) {
+	uid, gid := getSpecUser(ociSpec)
+	if uid == 0 && gid == 0 {
+		return bundleDir, idtools.MkdirAllAndChownNew(bundleDir, 0755, idtools.IDPair{0, 0})
+	}
+
+	p := string(filepath.Separator)
+	components := strings.Split(bundleDir, string(filepath.Separator))
+	for _, d := range components[1:] {
+		p = filepath.Join(p, d)
+		fi, err := os.Stat(p)
+		if err != nil && !os.IsNotExist(err) {
+			return "", err
+		}
+		if os.IsNotExist(err) || fi.Mode()&1 == 0 {
+			p = fmt.Sprintf("%s.%d.%d", p, uid, gid)
+			if err := idtools.MkdirAndChown(p, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) {
+				return "", err
+			}
+		}
+	}
+
+	return p, nil
+}
+
+func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *containerd.FIFOSet {
+	fifos := &containerd.FIFOSet{
+		Terminal: withTerminal,
+		Out:      filepath.Join(bundleDir, processID+"-stdout"),
+	}
+
+	if withStdin {
+		fifos.In = filepath.Join(bundleDir, processID+"-stdin")
+	}
+
+	if !fifos.Terminal {
+		fifos.Err = filepath.Join(bundleDir, processID+"-stderr")
+	}
+
+	return fifos
+}
diff --git a/libcontainerd/client_daemon_windows.go b/libcontainerd/client_daemon_windows.go
new file mode 100644
index 0000000..9bb5d86
--- /dev/null
+++ b/libcontainerd/client_daemon_windows.go
@@ -0,0 +1,53 @@
+package libcontainerd
+
+import (
+	"fmt"
+
+	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/windows/hcsshimtypes"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+func summaryFromInterface(i interface{}) (*Summary, error) {
+	switch pd := i.(type) {
+	case *hcsshimtypes.ProcessDetails:
+		return &Summary{
+			CreateTimestamp:              pd.CreatedAt,
+			ImageName:                    pd.ImageName,
+			KernelTime100ns:              pd.KernelTime_100Ns,
+			MemoryCommitBytes:            pd.MemoryCommitBytes,
+			MemoryWorkingSetPrivateBytes: pd.MemoryWorkingSetPrivateBytes,
+			MemoryWorkingSetSharedBytes:  pd.MemoryWorkingSetSharedBytes,
+			ProcessId:                    pd.ProcessID,
+			UserTime100ns:                pd.UserTime_100Ns,
+		}, nil
+	default:
+		return nil, errors.Errorf("Unknown process details type %T", pd)
+	}
+}
+
+func prepareBundleDir(bundleDir string, ociSpec *specs.Spec) (string, error) {
+	return bundleDir, nil
+}
+
+func pipeName(containerID, processID, name string) string {
+	return fmt.Sprintf(`\\.\pipe\containerd-%s-%s-%s`, containerID, processID, name)
+}
+
+func newFIFOSet(bundleDir, containerID, processID string, withStdin, withTerminal bool) *containerd.FIFOSet {
+	fifos := &containerd.FIFOSet{
+		Terminal: withTerminal,
+		Out:      pipeName(containerID, processID, "stdout"),
+	}
+
+	if withStdin {
+		fifos.In = pipeName(containerID, processID, "stdin")
+	}
+
+	if !fifos.Terminal {
+		fifos.Err = pipeName(containerID, processID, "stderr")
+	}
+
+	return fifos
+}
diff --git a/libcontainerd/client_linux.go b/libcontainerd/client_linux.go
deleted file mode 100644
index 6c3460a..0000000
--- a/libcontainerd/client_linux.go
+++ /dev/null
@@ -1,619 +0,0 @@
-package libcontainerd
-
-import (
-	"fmt"
-	"os"
-	"strings"
-	"sync"
-	"time"
-
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	containerd_runtime_types "github.com/containerd/containerd/runtime"
-	"github.com/docker/docker/pkg/ioutils"
-	"github.com/docker/docker/pkg/mount"
-	"github.com/golang/protobuf/ptypes"
-	"github.com/golang/protobuf/ptypes/timestamp"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/sirupsen/logrus"
-	"golang.org/x/net/context"
-	"golang.org/x/sys/unix"
-)
-
-type client struct {
-	clientCommon
-
-	// Platform specific properties below here.
-	remote        *remote
-	q             queue
-	exitNotifiers map[string]*exitNotifier
-	liveRestore   bool
-}
-
-// GetServerVersion returns the connected server version information
-func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) {
-	resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{})
-	if err != nil {
-		return nil, err
-	}
-
-	sv := &ServerVersion{
-		GetServerVersionResponse: *resp,
-	}
-
-	return sv, nil
-}
-
-// AddProcess is the handler for adding a process to an already running
-// container. It's called through docker exec. It returns the system pid of the
-// exec'd process.
-func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (pid int, err error) {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	container, err := clnt.getContainer(containerID)
-	if err != nil {
-		return -1, err
-	}
-
-	spec, err := container.spec()
-	if err != nil {
-		return -1, err
-	}
-	sp := spec.Process
-	sp.Args = specp.Args
-	sp.Terminal = specp.Terminal
-	if len(specp.Env) > 0 {
-		sp.Env = specp.Env
-	}
-	if specp.Cwd != nil {
-		sp.Cwd = *specp.Cwd
-	}
-	if specp.User != nil {
-		sp.User = specs.User{
-			UID:            specp.User.UID,
-			GID:            specp.User.GID,
-			AdditionalGids: specp.User.AdditionalGids,
-		}
-	}
-	if specp.Capabilities != nil {
-		sp.Capabilities.Bounding = specp.Capabilities
-		sp.Capabilities.Effective = specp.Capabilities
-		sp.Capabilities.Inheritable = specp.Capabilities
-		sp.Capabilities.Permitted = specp.Capabilities
-	}
-
-	p := container.newProcess(processFriendlyName)
-
-	r := &containerd.AddProcessRequest{
-		Args:     sp.Args,
-		Cwd:      sp.Cwd,
-		Terminal: sp.Terminal,
-		Id:       containerID,
-		Env:      sp.Env,
-		User: &containerd.User{
-			Uid:            sp.User.UID,
-			Gid:            sp.User.GID,
-			AdditionalGids: sp.User.AdditionalGids,
-		},
-		Pid:             processFriendlyName,
-		Stdin:           p.fifo(unix.Stdin),
-		Stdout:          p.fifo(unix.Stdout),
-		Stderr:          p.fifo(unix.Stderr),
-		Capabilities:    sp.Capabilities.Effective,
-		ApparmorProfile: sp.ApparmorProfile,
-		SelinuxLabel:    sp.SelinuxLabel,
-		NoNewPrivileges: sp.NoNewPrivileges,
-		Rlimits:         convertRlimits(sp.Rlimits),
-	}
-
-	fifoCtx, cancel := context.WithCancel(context.Background())
-	defer func() {
-		if err != nil {
-			cancel()
-		}
-	}()
-
-	iopipe, err := p.openFifos(fifoCtx, sp.Terminal)
-	if err != nil {
-		return -1, err
-	}
-
-	resp, err := clnt.remote.apiClient.AddProcess(ctx, r)
-	if err != nil {
-		p.closeFifos(iopipe)
-		return -1, err
-	}
-
-	var stdinOnce sync.Once
-	stdin := iopipe.Stdin
-	iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error {
-		var err error
-		stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed
-			err = stdin.Close()
-			if err2 := p.sendCloseStdin(); err == nil {
-				err = err2
-			}
-		})
-		return err
-	})
-
-	container.processes[processFriendlyName] = p
-
-	if err := attachStdio(*iopipe); err != nil {
-		p.closeFifos(iopipe)
-		return -1, err
-	}
-
-	return int(resp.SystemPid), nil
-}
-
-func (clnt *client) SignalProcess(containerID string, pid string, sig int) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	_, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{
-		Id:     containerID,
-		Pid:    pid,
-		Signal: uint32(sig),
-	})
-	return err
-}
-
-func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	if _, err := clnt.getContainer(containerID); err != nil {
-		return err
-	}
-	_, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{
-		Id:     containerID,
-		Pid:    processFriendlyName,
-		Width:  uint32(width),
-		Height: uint32(height),
-	})
-	return err
-}
-
-func (clnt *client) Pause(containerID string) error {
-	return clnt.setState(containerID, StatePause)
-}
-
-func (clnt *client) setState(containerID, state string) error {
-	clnt.lock(containerID)
-	container, err := clnt.getContainer(containerID)
-	if err != nil {
-		clnt.unlock(containerID)
-		return err
-	}
-	if container.systemPid == 0 {
-		clnt.unlock(containerID)
-		return fmt.Errorf("No active process for container %s", containerID)
-	}
-	st := "running"
-	if state == StatePause {
-		st = "paused"
-	}
-	chstate := make(chan struct{})
-	_, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{
-		Id:     containerID,
-		Pid:    InitFriendlyName,
-		Status: st,
-	})
-	if err != nil {
-		clnt.unlock(containerID)
-		return err
-	}
-	container.pauseMonitor.append(state, chstate)
-	clnt.unlock(containerID)
-	<-chstate
-	return nil
-}
-
-func (clnt *client) Resume(containerID string) error {
-	return clnt.setState(containerID, StateResume)
-}
-
-func (clnt *client) Stats(containerID string) (*Stats, error) {
-	resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID})
-	if err != nil {
-		return nil, err
-	}
-	return (*Stats)(resp), nil
-}
-
-// Take care of the old 1.11.0 behavior in case the version upgrade
-// happened without a clean daemon shutdown
-func (clnt *client) cleanupOldRootfs(containerID string) {
-	// Unmount and delete the bundle folder
-	if mts, err := mount.GetMounts(); err == nil {
-		for _, mts := range mts {
-			if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") {
-				if err := unix.Unmount(mts.Mountpoint, unix.MNT_DETACH); err == nil {
-					os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs"))
-				}
-				break
-			}
-		}
-	}
-}
-
-func (clnt *client) setExited(containerID string, exitCode uint32) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-
-	err := clnt.backend.StateChanged(containerID, StateInfo{
-		CommonStateInfo: CommonStateInfo{
-			State:    StateExit,
-			ExitCode: exitCode,
-		}})
-
-	clnt.cleanupOldRootfs(containerID)
-
-	return err
-}
-
-func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
-	cont, err := clnt.getContainerdContainer(containerID)
-	if err != nil {
-		return nil, err
-	}
-	pids := make([]int, len(cont.Pids))
-	for i, p := range cont.Pids {
-		pids[i] = int(p)
-	}
-	return pids, nil
-}
-
-// Summary returns a summary of the processes running in a container.
-// This is a no-op on Linux.
-func (clnt *client) Summary(containerID string) ([]Summary, error) {
-	return nil, nil
-}
-
-func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) {
-	resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID})
-	if err != nil {
-		return nil, err
-	}
-	for _, cont := range resp.Containers {
-		if cont.Id == containerID {
-			return cont, nil
-		}
-	}
-	return nil, fmt.Errorf("invalid state response")
-}
-
-func (clnt *client) UpdateResources(containerID string, resources Resources) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	container, err := clnt.getContainer(containerID)
-	if err != nil {
-		return err
-	}
-	if container.systemPid == 0 {
-		return fmt.Errorf("No active process for container %s", containerID)
-	}
-	_, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{
-		Id:        containerID,
-		Pid:       InitFriendlyName,
-		Resources: (*containerd.UpdateResource)(&resources),
-	})
-	if err != nil {
-		return err
-	}
-	return nil
-}
-
-func (clnt *client) getExitNotifier(containerID string) *exitNotifier {
-	clnt.mapMutex.RLock()
-	defer clnt.mapMutex.RUnlock()
-	return clnt.exitNotifiers[containerID]
-}
-
-func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier {
-	clnt.mapMutex.Lock()
-	w, ok := clnt.exitNotifiers[containerID]
-	defer clnt.mapMutex.Unlock()
-	if !ok {
-		w = &exitNotifier{c: make(chan struct{}), client: clnt}
-		clnt.exitNotifiers[containerID] = w
-	}
-	return w
-}
-
-func (clnt *client) restore(cont *containerd.Container, lastEvent *containerd.Event, attachStdio StdioCallback, options ...CreateOption) (err error) {
-	clnt.lock(cont.Id)
-	defer clnt.unlock(cont.Id)
-
-	logrus.Debugf("libcontainerd: restore container %s state %s", cont.Id, cont.Status)
-
-	containerID := cont.Id
-	if _, err := clnt.getContainer(containerID); err == nil {
-		return fmt.Errorf("container %s is already active", containerID)
-	}
-
-	defer func() {
-		if err != nil {
-			clnt.deleteContainer(cont.Id)
-		}
-	}()
-
-	container := clnt.newContainer(cont.BundlePath, options...)
-	container.systemPid = systemPid(cont)
-
-	var terminal bool
-	for _, p := range cont.Processes {
-		if p.Pid == InitFriendlyName {
-			terminal = p.Terminal
-		}
-	}
-
-	fifoCtx, cancel := context.WithCancel(context.Background())
-	defer func() {
-		if err != nil {
-			cancel()
-		}
-	}()
-
-	iopipe, err := container.openFifos(fifoCtx, terminal)
-	if err != nil {
-		return err
-	}
-	var stdinOnce sync.Once
-	stdin := iopipe.Stdin
-	iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error {
-		var err error
-		stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed
-			err = stdin.Close()
-		})
-		return err
-	})
-
-	if err := attachStdio(*iopipe); err != nil {
-		container.closeFifos(iopipe)
-		return err
-	}
-
-	clnt.appendContainer(container)
-
-	err = clnt.backend.StateChanged(containerID, StateInfo{
-		CommonStateInfo: CommonStateInfo{
-			State: StateRestore,
-			Pid:   container.systemPid,
-		}})
-
-	if err != nil {
-		container.closeFifos(iopipe)
-		return err
-	}
-
-	if lastEvent != nil {
-		// This should only be a pause or resume event
-		if lastEvent.Type == StatePause || lastEvent.Type == StateResume {
-			return clnt.backend.StateChanged(containerID, StateInfo{
-				CommonStateInfo: CommonStateInfo{
-					State: lastEvent.Type,
-					Pid:   container.systemPid,
-				}})
-		}
-
-		logrus.Warnf("libcontainerd: unexpected backlog event: %#v", lastEvent)
-	}
-
-	return nil
-}
-
-func (clnt *client) getContainerLastEventSinceTime(id string, tsp *timestamp.Timestamp) (*containerd.Event, error) {
-	er := &containerd.EventsRequest{
-		Timestamp:  tsp,
-		StoredOnly: true,
-		Id:         id,
-	}
-	events, err := clnt.remote.apiClient.Events(context.Background(), er)
-	if err != nil {
-		logrus.Errorf("libcontainerd: failed to get container events stream for %s: %q", er.Id, err)
-		return nil, err
-	}
-
-	var ev *containerd.Event
-	for {
-		e, err := events.Recv()
-		if err != nil {
-			if err.Error() == "EOF" {
-				break
-			}
-			logrus.Errorf("libcontainerd: failed to get container event for %s: %q", id, err)
-			return nil, err
-		}
-		ev = e
-		logrus.Debugf("libcontainerd: received past event %#v", ev)
-	}
-
-	return ev, nil
-}
-
-func (clnt *client) getContainerLastEvent(id string) (*containerd.Event, error) {
-	ev, err := clnt.getContainerLastEventSinceTime(id, clnt.remote.restoreFromTimestamp)
-	if err == nil && ev == nil {
-		// If ev is nil and the container is running in containerd,
-		// we already consumed all the event of the
-		// container, included the "exit" one.
-		// Thus, we request all events containerd has in memory for
-		// this container in order to get the last one (which should
-		// be an exit event)
-		logrus.Warnf("libcontainerd: client is out of sync, restore was called on a fully synced container (%s).", id)
-		// Request all events since beginning of time
-		t := time.Unix(0, 0)
-		tsp, err := ptypes.TimestampProto(t)
-		if err != nil {
-			logrus.Errorf("libcontainerd: getLastEventSinceTime() failed to convert timestamp: %q", err)
-			return nil, err
-		}
-
-		return clnt.getContainerLastEventSinceTime(id, tsp)
-	}
-
-	return ev, err
-}
-
-func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error {
-	// Synchronize with live events
-	clnt.remote.Lock()
-	defer clnt.remote.Unlock()
-	// Check that containerd still knows this container.
-	//
-	// In the unlikely event that Restore for this container process
-	// the its past event before the main loop, the event will be
-	// processed twice. However, this is not an issue as all those
-	// events will do is change the state of the container to be
-	// exactly the same.
-	cont, err := clnt.getContainerdContainer(containerID)
-	// Get its last event
-	ev, eerr := clnt.getContainerLastEvent(containerID)
-	if err != nil || containerd_runtime_types.State(cont.Status) == containerd_runtime_types.Stopped {
-		if err != nil {
-			logrus.Warnf("libcontainerd: failed to retrieve container %s state: %v", containerID, err)
-		}
-		if ev != nil && (ev.Pid != InitFriendlyName || ev.Type != StateExit) {
-			// Wait a while for the exit event
-			timeout := time.NewTimer(10 * time.Second)
-			tick := time.NewTicker(100 * time.Millisecond)
-		stop:
-			for {
-				select {
-				case <-timeout.C:
-					break stop
-				case <-tick.C:
-					ev, eerr = clnt.getContainerLastEvent(containerID)
-					if eerr != nil {
-						break stop
-					}
-					if ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit {
-						break stop
-					}
-				}
-			}
-			timeout.Stop()
-			tick.Stop()
-		}
-
-		// get the exit status for this container, if we don't have
-		// one, indicate an error
-		ec := uint32(255)
-		if eerr == nil && ev != nil && ev.Pid == InitFriendlyName && ev.Type == StateExit {
-			ec = ev.Status
-		}
-		clnt.setExited(containerID, ec)
-
-		return nil
-	}
-
-	// container is still alive
-	if clnt.liveRestore {
-		if err := clnt.restore(cont, ev, attachStdio, options...); err != nil {
-			logrus.Errorf("libcontainerd: error restoring %s: %v", containerID, err)
-		}
-		return nil
-	}
-
-	// Kill the container if liveRestore == false
-	w := clnt.getOrCreateExitNotifier(containerID)
-	clnt.lock(cont.Id)
-	container := clnt.newContainer(cont.BundlePath)
-	container.systemPid = systemPid(cont)
-	clnt.appendContainer(container)
-	clnt.unlock(cont.Id)
-
-	container.discardFifos()
-
-	if err := clnt.Signal(containerID, int(unix.SIGTERM)); err != nil {
-		logrus.Errorf("libcontainerd: error sending sigterm to %v: %v", containerID, err)
-	}
-
-	// Let the main loop handle the exit event
-	clnt.remote.Unlock()
-
-	if ev != nil && ev.Type == StatePause {
-		// resume container, it depends on the main loop, so we do it after Unlock()
-		logrus.Debugf("libcontainerd: %s was paused, resuming it so it can die", containerID)
-		if err := clnt.Resume(containerID); err != nil {
-			return fmt.Errorf("failed to resume container: %v", err)
-		}
-	}
-
-	select {
-	case <-time.After(10 * time.Second):
-		if err := clnt.Signal(containerID, int(unix.SIGKILL)); err != nil {
-			logrus.Errorf("libcontainerd: error sending sigkill to %v: %v", containerID, err)
-		}
-		select {
-		case <-time.After(2 * time.Second):
-		case <-w.wait():
-			// relock because of the defer
-			clnt.remote.Lock()
-			return nil
-		}
-	case <-w.wait():
-		// relock because of the defer
-		clnt.remote.Lock()
-		return nil
-	}
-	// relock because of the defer
-	clnt.remote.Lock()
-
-	clnt.deleteContainer(containerID)
-
-	return clnt.setExited(containerID, uint32(255))
-}
-
-func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	if _, err := clnt.getContainer(containerID); err != nil {
-		return err
-	}
-
-	_, err := clnt.remote.apiClient.CreateCheckpoint(context.Background(), &containerd.CreateCheckpointRequest{
-		Id: containerID,
-		Checkpoint: &containerd.Checkpoint{
-			Name:        checkpointID,
-			Exit:        exit,
-			Tcp:         true,
-			UnixSockets: true,
-			Shell:       false,
-			EmptyNS:     []string{"network"},
-		},
-		CheckpointDir: checkpointDir,
-	})
-	return err
-}
-
-func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	if _, err := clnt.getContainer(containerID); err != nil {
-		return err
-	}
-
-	_, err := clnt.remote.apiClient.DeleteCheckpoint(context.Background(), &containerd.DeleteCheckpointRequest{
-		Id:            containerID,
-		Name:          checkpointID,
-		CheckpointDir: checkpointDir,
-	})
-	return err
-}
-
-func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	if _, err := clnt.getContainer(containerID); err != nil {
-		return nil, err
-	}
-
-	resp, err := clnt.remote.apiClient.ListCheckpoint(context.Background(), &containerd.ListCheckpointRequest{
-		Id:            containerID,
-		CheckpointDir: checkpointDir,
-	})
-	if err != nil {
-		return nil, err
-	}
-	return (*Checkpoints)(resp), nil
-}
diff --git a/libcontainerd/client_local_windows.go b/libcontainerd/client_local_windows.go
new file mode 100644
index 0000000..8ce9dfe
--- /dev/null
+++ b/libcontainerd/client_local_windows.go
@@ -0,0 +1,1345 @@
+package libcontainerd
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/Microsoft/hcsshim"
+	opengcs "github.com/Microsoft/opengcs/client"
+	"github.com/containerd/containerd"
+	"github.com/docker/docker/pkg/sysinfo"
+	"github.com/docker/docker/pkg/system"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"golang.org/x/sys/windows"
+)
+
+const InitProcessName = "init"
+
+type process struct {
+	id         string
+	pid        int
+	hcsProcess hcsshim.Process
+}
+
+type container struct {
+	sync.Mutex
+
+	// The ociSpec is required, as client.Create() needs a spec, but can
+	// be called from the RestartManager context which does not otherwise
+	// have access to the Spec
+	ociSpec *specs.Spec
+
+	isWindows           bool
+	manualStopRequested bool
+	hcsContainer        hcsshim.Container
+
+	id            string
+	status        Status
+	exitedAt      time.Time
+	exitCode      uint32
+	waitCh        chan struct{}
+	init          *process
+	execs         map[string]*process
+	updatePending bool
+}
+
+// Win32 error codes that are used for various workarounds
+// These really should be ALL_CAPS to match golangs syscall library and standard
+// Win32 error conventions, but golint insists on CamelCase.
+const (
+	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
+	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
+	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
+	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
+)
+
+// defaultOwner is a tag passed to HCS to allow it to differentiate between
+// container creator management stacks. We hard code "docker" in the case
+// of docker.
+const defaultOwner = "docker"
+
+func (c *client) Version(ctx context.Context) (containerd.Version, error) {
+	return containerd.Version{}, errors.New("not implemented on Windows")
+}
+
+// Create is the entrypoint to create a container from a spec.
+// Table below shows the fields required for HCS JSON calling parameters,
+// where if not populated, is omitted.
+// +-----------------+--------------------------------------------+---------------------------------------------------+
+// |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
+// +-----------------+--------------------------------------------+---------------------------------------------------+
+// | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
+// | LayerFolderPath | %root%\windowsfilter\containerID           | %root%\windowsfilter\containerID (servicing only) |
+// | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
+// | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
+// +-----------------+--------------------------------------------+---------------------------------------------------+
+//
+// Isolation=Process example:
+//
+// {
+//	"SystemType": "Container",
+//	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
+//	"Owner": "docker",
+//	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
+//	"IgnoreFlushesDuringBoot": true,
+//	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
+//	"Layers": [{
+//		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
+//		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
+//	}],
+//	"HostName": "5e0055c814a6",
+//	"MappedDirectories": [],
+//	"HvPartition": false,
+//	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
+//	"Servicing": false
+//}
+//
+// Isolation=Hyper-V example:
+//
+//{
+//	"SystemType": "Container",
+//	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
+//	"Owner": "docker",
+//	"IgnoreFlushesDuringBoot": true,
+//	"Layers": [{
+//		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
+//		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
+//	}],
+//	"HostName": "475c2c58933b",
+//	"MappedDirectories": [],
+//	"HvPartition": true,
+//	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
+//	"DNSSearchList": "a.com,b.com,c.com",
+//	"HvRuntime": {
+//		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
+//	},
+//	"Servicing": false
+//}
+func (c *client) Create(_ context.Context, id string, spec *specs.Spec, runtimeOptions interface{}) error {
+	if ctr := c.getContainer(id); ctr != nil {
+		return errors.WithStack(newConflictError("id already in use"))
+	}
+
+	// spec.Linux must be nil for Windows containers, but spec.Windows
+	// will be filled in regardless of container platform.  This is a
+	// temporary workaround due to LCOW requiring layer folder paths,
+	// which are stored under spec.Windows.
+	//
+	// TODO: @darrenstahlmsft fix this once the OCI spec is updated to
+	// support layer folder paths for LCOW
+	if spec.Linux == nil {
+		return c.createWindows(id, spec, runtimeOptions)
+	}
+	return c.createLinux(id, spec, runtimeOptions)
+}
+
+func (c *client) createWindows(id string, spec *specs.Spec, runtimeOptions interface{}) error {
+	logger := c.logger.WithField("container", id)
+	configuration := &hcsshim.ContainerConfig{
+		SystemType: "Container",
+		Name:       id,
+		Owner:      defaultOwner,
+		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
+		HostName:                spec.Hostname,
+		HvPartition:             false,
+		Servicing:               spec.Windows.Servicing,
+	}
+
+	if spec.Windows.Resources != nil {
+		if spec.Windows.Resources.CPU != nil {
+			if spec.Windows.Resources.CPU.Count != nil {
+				// This check is being done here rather than in adaptContainerSettings
+				// because we don't want to update the HostConfig in case this container
+				// is moved to a host with more CPUs than this one.
+				cpuCount := *spec.Windows.Resources.CPU.Count
+				hostCPUCount := uint64(sysinfo.NumCPU())
+				if cpuCount > hostCPUCount {
+					c.logger.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
+					cpuCount = hostCPUCount
+				}
+				configuration.ProcessorCount = uint32(cpuCount)
+			}
+			if spec.Windows.Resources.CPU.Shares != nil {
+				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
+			}
+			if spec.Windows.Resources.CPU.Maximum != nil {
+				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
+			}
+		}
+		if spec.Windows.Resources.Memory != nil {
+			if spec.Windows.Resources.Memory.Limit != nil {
+				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
+			}
+		}
+		if spec.Windows.Resources.Storage != nil {
+			if spec.Windows.Resources.Storage.Bps != nil {
+				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
+			}
+			if spec.Windows.Resources.Storage.Iops != nil {
+				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
+			}
+		}
+	}
+
+	if spec.Windows.HyperV != nil {
+		configuration.HvPartition = true
+	}
+
+	if spec.Windows.Network != nil {
+		configuration.EndpointList = spec.Windows.Network.EndpointList
+		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
+		if spec.Windows.Network.DNSSearchList != nil {
+			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
+		}
+		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
+	}
+
+	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
+		configuration.Credentials = cs
+	}
+
+	// We must have least two layers in the spec, the bottom one being a
+	// base image, the top one being the RW layer.
+	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
+		return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
+	}
+
+	// Strip off the top-most layer as that's passed in separately to HCS
+	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
+	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
+
+	if configuration.HvPartition {
+		// We don't currently support setting the utility VM image explicitly.
+		// TODO @swernli/jhowardmsft circa RS3/4, this may be re-locatable.
+		if spec.Windows.HyperV.UtilityVMPath != "" {
+			return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
+		}
+
+		// Find the upper-most utility VM image.
+		var uvmImagePath string
+		for _, path := range layerFolders {
+			fullPath := filepath.Join(path, "UtilityVM")
+			_, err := os.Stat(fullPath)
+			if err == nil {
+				uvmImagePath = fullPath
+				break
+			}
+			if !os.IsNotExist(err) {
+				return err
+			}
+		}
+		if uvmImagePath == "" {
+			return errors.New("utility VM image could not be found")
+		}
+		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
+
+		if spec.Root.Path != "" {
+			return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
+		}
+	} else {
+		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
+		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
+			return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
+		}
+		// HCS API requires the trailing backslash to be removed
+		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
+	}
+
+	if spec.Root.Readonly {
+		return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
+	}
+
+	for _, layerPath := range layerFolders {
+		_, filename := filepath.Split(layerPath)
+		g, err := hcsshim.NameToGuid(filename)
+		if err != nil {
+			return err
+		}
+		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
+			ID:   g.ToString(),
+			Path: layerPath,
+		})
+	}
+
+	// Add the mounts (volumes, bind mounts etc) to the structure
+	var mds []hcsshim.MappedDir
+	var mps []hcsshim.MappedPipe
+	for _, mount := range spec.Mounts {
+		const pipePrefix = `\\.\pipe\`
+		if mount.Type != "" {
+			return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
+		}
+		if strings.HasPrefix(mount.Destination, pipePrefix) {
+			mp := hcsshim.MappedPipe{
+				HostPath:          mount.Source,
+				ContainerPipeName: mount.Destination[len(pipePrefix):],
+			}
+			mps = append(mps, mp)
+		} else {
+			md := hcsshim.MappedDir{
+				HostPath:      mount.Source,
+				ContainerPath: mount.Destination,
+				ReadOnly:      false,
+			}
+			for _, o := range mount.Options {
+				if strings.ToLower(o) == "ro" {
+					md.ReadOnly = true
+				}
+			}
+			mds = append(mds, md)
+		}
+	}
+	configuration.MappedDirectories = mds
+	if len(mps) > 0 && system.GetOSVersion().Build < 16210 { // replace with Win10 RS3 build number at RTM
+		return errors.New("named pipe mounts are not supported on this version of Windows")
+	}
+	configuration.MappedPipes = mps
+
+	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
+	if err != nil {
+		return err
+	}
+
+	// Construct a container object for calling start on it.
+	ctr := &container{
+		id:           id,
+		execs:        make(map[string]*process),
+		isWindows:    true,
+		ociSpec:      spec,
+		hcsContainer: hcsContainer,
+		status:       StatusCreated,
+		waitCh:       make(chan struct{}),
+	}
+
+	// Start the container. If this is a servicing container, this call
+	// will block until the container is done with the servicing
+	// execution.
+	logger.Debug("starting container")
+	if err = hcsContainer.Start(); err != nil {
+		c.logger.WithError(err).Error("failed to start container")
+		ctr.debugGCS()
+		if err := c.terminateContainer(ctr); err != nil {
+			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
+		} else {
+			c.logger.Debug("cleaned up after failed Start by calling Terminate")
+		}
+		return err
+	}
+	ctr.debugGCS()
+
+	c.Lock()
+	c.containers[id] = ctr
+	c.Unlock()
+
+	logger.Debug("createWindows() completed successfully")
+	return nil
+
+}
+
+func (c *client) createLinux(id string, spec *specs.Spec, runtimeOptions interface{}) error {
+	logrus.Debugf("libcontainerd: createLinux(): containerId %s ", id)
+	logger := c.logger.WithField("container", id)
+
+	if runtimeOptions == nil {
+		return fmt.Errorf("lcow option must be supplied to the runtime")
+	}
+	lcowConfig, ok := runtimeOptions.(*opengcs.Config)
+	if !ok {
+		return fmt.Errorf("lcow option must be supplied to the runtime")
+	}
+
+	configuration := &hcsshim.ContainerConfig{
+		HvPartition:   true,
+		Name:          id,
+		SystemType:    "container",
+		ContainerType: "linux",
+		Owner:         defaultOwner,
+		TerminateOnLastHandleClosed: true,
+	}
+
+	if lcowConfig.ActualMode == opengcs.ModeActualVhdx {
+		configuration.HvRuntime = &hcsshim.HvRuntime{
+			ImagePath:          lcowConfig.Vhdx,
+			BootSource:         "Vhd",
+			WritableBootSource: false,
+		}
+	} else {
+		configuration.HvRuntime = &hcsshim.HvRuntime{
+			ImagePath:           lcowConfig.KirdPath,
+			LinuxKernelFile:     lcowConfig.KernelFile,
+			LinuxInitrdFile:     lcowConfig.InitrdFile,
+			LinuxBootParameters: lcowConfig.BootParameters,
+		}
+	}
+
+	if spec.Windows == nil {
+		return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
+	}
+
+	// We must have least one layer in the spec
+	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
+		return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
+	}
+
+	// Strip off the top-most layer as that's passed in separately to HCS
+	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
+	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
+
+	for _, layerPath := range layerFolders {
+		_, filename := filepath.Split(layerPath)
+		g, err := hcsshim.NameToGuid(filename)
+		if err != nil {
+			return err
+		}
+		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
+			ID:   g.ToString(),
+			Path: filepath.Join(layerPath, "layer.vhd"),
+		})
+	}
+
+	if spec.Windows.Network != nil {
+		configuration.EndpointList = spec.Windows.Network.EndpointList
+		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
+		if spec.Windows.Network.DNSSearchList != nil {
+			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
+		}
+		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
+	}
+
+	// Add the mounts (volumes, bind mounts etc) to the structure. We have to do
+	// some translation for both the mapped directories passed into HCS and in
+	// the spec.
+	//
+	// For HCS, we only pass in the mounts from the spec which are type "bind".
+	// Further, the "ContainerPath" field (which is a little mis-leadingly
+	// named when it applies to the utility VM rather than the container in the
+	// utility VM) is moved to under /tmp/gcs/<ID>/binds, where this is passed
+	// by the caller through a 'uvmpath' option.
+	//
+	// We do similar translation for the mounts in the spec by stripping out
+	// the uvmpath option, and translating the Source path to the location in the
+	// utility VM calculated above.
+	//
+	// From inside the utility VM, you would see a 9p mount such as in the following
+	// where a host folder has been mapped to /target. The line with /tmp/gcs/<ID>/binds
+	// specifically:
+	//
+	//	/ # mount
+	//	rootfs on / type rootfs (rw,size=463736k,nr_inodes=115934)
+	//	proc on /proc type proc (rw,relatime)
+	//	sysfs on /sys type sysfs (rw,relatime)
+	//	udev on /dev type devtmpfs (rw,relatime,size=498100k,nr_inodes=124525,mode=755)
+	//	tmpfs on /run type tmpfs (rw,relatime)
+	//	cgroup on /sys/fs/cgroup type cgroup (rw,relatime,cpuset,cpu,cpuacct,blkio,memory,devices,freezer,net_cls,perf_event,net_prio,hugetlb,pids,rdma)
+	//	mqueue on /dev/mqueue type mqueue (rw,relatime)
+	//	devpts on /dev/pts type devpts (rw,relatime,mode=600,ptmxmode=000)
+	//	/binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target on /binds/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/target type 9p (rw,sync,dirsync,relatime,trans=fd,rfdno=6,wfdno=6)
+	//	/dev/pmem0 on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0 type ext4 (ro,relatime,block_validity,delalloc,norecovery,barrier,dax,user_xattr,acl)
+	//	/dev/sda on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch type ext4 (rw,relatime,block_validity,delalloc,barrier,user_xattr,acl)
+	//	overlay on /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/rootfs type overlay (rw,relatime,lowerdir=/tmp/base/:/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/layer0,upperdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/upper,workdir=/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc/scratch/work)
+	//
+	//  /tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l
+	//	total 16
+	//	drwx------    3 0        0               60 Sep  7 18:54 binds
+	//	-rw-r--r--    1 0        0             3345 Sep  7 18:54 config.json
+	//	drwxr-xr-x   10 0        0             4096 Sep  6 17:26 layer0
+	//	drwxr-xr-x    1 0        0             4096 Sep  7 18:54 rootfs
+	//	drwxr-xr-x    5 0        0             4096 Sep  7 18:54 scratch
+	//
+	//	/tmp/gcs/b3ea9126d67702173647ece2744f7c11181c0150e9890fc9a431849838033edc # ls -l binds
+	//	total 0
+	//	drwxrwxrwt    2 0        0             4096 Sep  7 16:51 target
+
+	mds := []hcsshim.MappedDir{}
+	specMounts := []specs.Mount{}
+	for _, mount := range spec.Mounts {
+		specMount := mount
+		if mount.Type == "bind" {
+			// Strip out the uvmpath from the options
+			updatedOptions := []string{}
+			uvmPath := ""
+			readonly := false
+			for _, opt := range mount.Options {
+				dropOption := false
+				elements := strings.SplitN(opt, "=", 2)
+				switch elements[0] {
+				case "uvmpath":
+					uvmPath = elements[1]
+					dropOption = true
+				case "rw":
+				case "ro":
+					readonly = true
+				case "rbind":
+				default:
+					return fmt.Errorf("unsupported option %q", opt)
+				}
+				if !dropOption {
+					updatedOptions = append(updatedOptions, opt)
+				}
+			}
+			mount.Options = updatedOptions
+			if uvmPath == "" {
+				return fmt.Errorf("no uvmpath for bind mount %+v", mount)
+			}
+			md := hcsshim.MappedDir{
+				HostPath:          mount.Source,
+				ContainerPath:     path.Join(uvmPath, mount.Destination),
+				CreateInUtilityVM: true,
+				ReadOnly:          readonly,
+			}
+			mds = append(mds, md)
+			specMount.Source = path.Join(uvmPath, mount.Destination)
+		}
+		specMounts = append(specMounts, specMount)
+	}
+	configuration.MappedDirectories = mds
+
+	hcsContainer, err := hcsshim.CreateContainer(id, configuration)
+	if err != nil {
+		return err
+	}
+
+	spec.Mounts = specMounts
+
+	// Construct a container object for calling start on it.
+	ctr := &container{
+		id:           id,
+		execs:        make(map[string]*process),
+		isWindows:    false,
+		ociSpec:      spec,
+		hcsContainer: hcsContainer,
+		status:       StatusCreated,
+		waitCh:       make(chan struct{}),
+	}
+
+	// Start the container. If this is a servicing container, this call
+	// will block until the container is done with the servicing
+	// execution.
+	logger.Debug("starting container")
+	if err = hcsContainer.Start(); err != nil {
+		c.logger.WithError(err).Error("failed to start container")
+		ctr.debugGCS()
+		if err := c.terminateContainer(ctr); err != nil {
+			c.logger.WithError(err).Error("failed to cleanup after a failed Start")
+		} else {
+			c.logger.Debug("cleaned up after failed Start by calling Terminate")
+		}
+		return err
+	}
+	ctr.debugGCS()
+
+	c.Lock()
+	c.containers[id] = ctr
+	c.Unlock()
+
+	c.eventQ.append(id, func() {
+		ei := EventInfo{
+			ContainerID: id,
+		}
+		c.logger.WithFields(logrus.Fields{
+			"container": ctr.id,
+			"event":     EventCreate,
+		}).Info("sending event")
+		err := c.backend.ProcessEvent(id, EventCreate, ei)
+		if err != nil {
+			c.logger.WithError(err).WithFields(logrus.Fields{
+				"container": id,
+				"event":     EventCreate,
+			}).Error("failed to process event")
+		}
+	})
+
+	logger.Debug("createLinux() completed successfully")
+	return nil
+}
+
+func (c *client) Start(_ context.Context, id, _ string, withStdin bool, attachStdio StdioCallback) (int, error) {
+	ctr := c.getContainer(id)
+	switch {
+	case ctr == nil:
+		return -1, errors.WithStack(newNotFoundError("no such container"))
+	case ctr.init != nil:
+		return -1, errors.WithStack(newConflictError("container already started"))
+	}
+
+	logger := c.logger.WithField("container", id)
+
+	// Note we always tell HCS to create stdout as it's required
+	// regardless of '-i' or '-t' options, so that docker can always grab
+	// the output through logs. We also tell HCS to always create stdin,
+	// even if it's not used - it will be closed shortly. Stderr is only
+	// created if it we're not -t.
+	var (
+		emulateConsole   bool
+		createStdErrPipe bool
+	)
+	if ctr.ociSpec.Process != nil {
+		emulateConsole = ctr.ociSpec.Process.Terminal
+		createStdErrPipe = !ctr.ociSpec.Process.Terminal && !ctr.ociSpec.Windows.Servicing
+	}
+
+	createProcessParms := &hcsshim.ProcessConfig{
+		EmulateConsole:   emulateConsole,
+		WorkingDirectory: ctr.ociSpec.Process.Cwd,
+		CreateStdInPipe:  !ctr.ociSpec.Windows.Servicing,
+		CreateStdOutPipe: !ctr.ociSpec.Windows.Servicing,
+		CreateStdErrPipe: createStdErrPipe,
+	}
+
+	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
+		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
+		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
+	}
+
+	// Configure the environment for the process
+	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
+	if ctr.isWindows {
+		createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ")
+	} else {
+		createProcessParms.CommandArgs = ctr.ociSpec.Process.Args
+	}
+	createProcessParms.User = ctr.ociSpec.Process.User.Username
+
+	// LCOW requires the raw OCI spec passed through HCS and onwards to
+	// GCS for the utility VM.
+	if !ctr.isWindows {
+		ociBuf, err := json.Marshal(ctr.ociSpec)
+		if err != nil {
+			return -1, err
+		}
+		ociRaw := json.RawMessage(ociBuf)
+		createProcessParms.OCISpecification = &ociRaw
+	}
+
+	ctr.Lock()
+	defer ctr.Unlock()
+
+	// Start the command running in the container.
+	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
+	if err != nil {
+		logger.WithError(err).Error("CreateProcess() failed")
+		return -1, err
+	}
+	defer func() {
+		if err != nil {
+			if err := newProcess.Kill(); err != nil {
+				logger.WithError(err).Error("failed to kill process")
+			}
+			go func() {
+				if err := newProcess.Wait(); err != nil {
+					logger.WithError(err).Error("failed to wait for process")
+				}
+				if err := newProcess.Close(); err != nil {
+					logger.WithError(err).Error("failed to clean process resources")
+				}
+			}()
+		}
+	}()
+	p := &process{
+		hcsProcess: newProcess,
+		id:         InitProcessName,
+		pid:        newProcess.Pid(),
+	}
+	logger.WithField("pid", p.pid).Debug("init process started")
+
+	// If this is a servicing container, wait on the process synchronously here and
+	// if it succeeds, wait for it cleanly shutdown and merge into the parent container.
+	if ctr.ociSpec.Windows.Servicing {
+		// reapProcess takes the lock
+		ctr.Unlock()
+		defer ctr.Lock()
+		exitCode := c.reapProcess(ctr, p)
+
+		if exitCode != 0 {
+			return -1, errors.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.id, exitCode)
+		}
+
+		return p.pid, nil
+	}
+
+	var (
+		stdout, stderr io.ReadCloser
+		stdin          io.WriteCloser
+	)
+	stdin, stdout, stderr, err = newProcess.Stdio()
+	if err != nil {
+		logger.WithError(err).Error("failed to get stdio pipes")
+		return -1, err
+	}
+
+	iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal}
+	iopipe.Stdin = createStdInCloser(stdin, newProcess)
+
+	// Convert io.ReadClosers to io.Readers
+	if stdout != nil {
+		iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
+	}
+	if stderr != nil {
+		iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
+	}
+
+	_, err = attachStdio(iopipe)
+	if err != nil {
+		logger.WithError(err).Error("failed to attache stdio")
+		return -1, err
+	}
+	ctr.status = StatusRunning
+	ctr.init = p
+
+	// Spin up a go routine waiting for exit to handle cleanup
+	go c.reapProcess(ctr, p)
+
+	// Generate the associated event
+	c.eventQ.append(id, func() {
+		ei := EventInfo{
+			ContainerID: id,
+			ProcessID:   InitProcessName,
+			Pid:         uint32(p.pid),
+		}
+		c.logger.WithFields(logrus.Fields{
+			"container":  ctr.id,
+			"event":      EventStart,
+			"event-info": ei,
+		}).Info("sending event")
+		err := c.backend.ProcessEvent(ei.ContainerID, EventStart, ei)
+		if err != nil {
+			c.logger.WithError(err).WithFields(logrus.Fields{
+				"container":  id,
+				"event":      EventStart,
+				"event-info": ei,
+			}).Error("failed to process event")
+		}
+	})
+	logger.Debug("start() completed")
+	return p.pid, nil
+}
+
+// Exec adds a process in an running container
+func (c *client) Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error) {
+	ctr := c.getContainer(containerID)
+	switch {
+	case ctr == nil:
+		return -1, errors.WithStack(newNotFoundError("no such container"))
+	case ctr.hcsContainer == nil:
+		return -1, errors.WithStack(newInvalidParameterError("container is not running"))
+	case ctr.execs != nil && ctr.execs[processID] != nil:
+		return -1, errors.WithStack(newConflictError("id already in use"))
+	}
+	logger := c.logger.WithFields(logrus.Fields{
+		"container": containerID,
+		"exec":      processID,
+	})
+
+	// Note we always tell HCS to
+	// create stdout as it's required regardless of '-i' or '-t' options, so that
+	// docker can always grab the output through logs. We also tell HCS to always
+	// create stdin, even if it's not used - it will be closed shortly. Stderr
+	// is only created if it we're not -t.
+	createProcessParms := hcsshim.ProcessConfig{
+		CreateStdInPipe:  true,
+		CreateStdOutPipe: true,
+		CreateStdErrPipe: !spec.Terminal,
+	}
+	if spec.Terminal {
+		createProcessParms.EmulateConsole = true
+		if spec.ConsoleSize != nil {
+			createProcessParms.ConsoleSize[0] = uint(spec.ConsoleSize.Height)
+			createProcessParms.ConsoleSize[1] = uint(spec.ConsoleSize.Width)
+		}
+	}
+
+	// Take working directory from the process to add if it is defined,
+	// otherwise take from the first process.
+	if spec.Cwd != "" {
+		createProcessParms.WorkingDirectory = spec.Cwd
+	} else {
+		createProcessParms.WorkingDirectory = ctr.ociSpec.Process.Cwd
+	}
+
+	// Configure the environment for the process
+	createProcessParms.Environment = setupEnvironmentVariables(spec.Env)
+	if ctr.isWindows {
+		createProcessParms.CommandLine = strings.Join(spec.Args, " ")
+	} else {
+		createProcessParms.CommandArgs = spec.Args
+	}
+	createProcessParms.User = spec.User.Username
+
+	logger.Debugf("exec commandLine: %s", createProcessParms.CommandLine)
+
+	// Start the command running in the container.
+	var (
+		stdout, stderr io.ReadCloser
+		stdin          io.WriteCloser
+	)
+	newProcess, err := ctr.hcsContainer.CreateProcess(&createProcessParms)
+	if err != nil {
+		logger.WithError(err).Errorf("exec's CreateProcess() failed")
+		return -1, err
+	}
+	pid := newProcess.Pid()
+	defer func() {
+		if err != nil {
+			if err := newProcess.Kill(); err != nil {
+				logger.WithError(err).Error("failed to kill process")
+			}
+			go func() {
+				if err := newProcess.Wait(); err != nil {
+					logger.WithError(err).Error("failed to wait for process")
+				}
+				if err := newProcess.Close(); err != nil {
+					logger.WithError(err).Error("failed to clean process resources")
+				}
+			}()
+		}
+	}()
+
+	stdin, stdout, stderr, err = newProcess.Stdio()
+	if err != nil {
+		logger.WithError(err).Error("getting std pipes failed")
+		return -1, err
+	}
+
+	iopipe := &IOPipe{Terminal: spec.Terminal}
+	iopipe.Stdin = createStdInCloser(stdin, newProcess)
+
+	// Convert io.ReadClosers to io.Readers
+	if stdout != nil {
+		iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
+	}
+	if stderr != nil {
+		iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
+	}
+
+	// Tell the engine to attach streams back to the client
+	_, err = attachStdio(iopipe)
+	if err != nil {
+		return -1, err
+	}
+
+	p := &process{
+		id:         processID,
+		pid:        pid,
+		hcsProcess: newProcess,
+	}
+
+	// Add the process to the container's list of processes
+	ctr.Lock()
+	ctr.execs[processID] = p
+	ctr.Unlock()
+
+	// Spin up a go routine waiting for exit to handle cleanup
+	go c.reapProcess(ctr, p)
+
+	c.eventQ.append(ctr.id, func() {
+		ei := EventInfo{
+			ContainerID: ctr.id,
+			ProcessID:   p.id,
+			Pid:         uint32(p.pid),
+		}
+		c.logger.WithFields(logrus.Fields{
+			"container":  ctr.id,
+			"event":      EventExecAdded,
+			"event-info": ei,
+		}).Info("sending event")
+		err := c.backend.ProcessEvent(ctr.id, EventExecAdded, ei)
+		if err != nil {
+			c.logger.WithError(err).WithFields(logrus.Fields{
+				"container":  ctr.id,
+				"event":      EventExecAdded,
+				"event-info": ei,
+			}).Error("failed to process event")
+		}
+		err = c.backend.ProcessEvent(ctr.id, EventExecStarted, ei)
+		if err != nil {
+			c.logger.WithError(err).WithFields(logrus.Fields{
+				"container":  ctr.id,
+				"event":      EventExecStarted,
+				"event-info": ei,
+			}).Error("failed to process event")
+		}
+	})
+
+	return pid, nil
+}
+
+// Signal handles `docker stop` on Windows. While Linux has support for
+// the full range of signals, signals aren't really implemented on Windows.
+// We fake supporting regular stop and -9 to force kill.
+func (c *client) SignalProcess(_ context.Context, containerID, processID string, signal int) error {
+	ctr, p, err := c.getProcess(containerID, processID)
+	if err != nil {
+		return err
+	}
+
+	ctr.manualStopRequested = true
+
+	logger := c.logger.WithFields(logrus.Fields{
+		"container": containerID,
+		"process":   processID,
+		"pid":       p.pid,
+		"signal":    signal,
+	})
+	logger.Debug("Signal()")
+
+	if processID == InitProcessName {
+		if syscall.Signal(signal) == syscall.SIGKILL {
+			// Terminate the compute system
+			if err := ctr.hcsContainer.Terminate(); err != nil {
+				if !hcsshim.IsPending(err) {
+					logger.WithError(err).Error("failed to terminate hccshim container")
+				}
+			}
+		} else {
+			// Shut down the container
+			if err := ctr.hcsContainer.Shutdown(); err != nil {
+				if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
+					// ignore errors
+					logger.WithError(err).Error("failed to shutdown hccshim container")
+				}
+			}
+		}
+	} else {
+		return p.hcsProcess.Kill()
+	}
+
+	return nil
+}
+
+// Resize handles a CLI event to resize an interactive docker run or docker
+// exec window.
+func (c *client) ResizeTerminal(_ context.Context, containerID, processID string, width, height int) error {
+	_, p, err := c.getProcess(containerID, processID)
+	if err != nil {
+		return err
+	}
+
+	c.logger.WithFields(logrus.Fields{
+		"container": containerID,
+		"process":   processID,
+		"height":    height,
+		"width":     width,
+		"pid":       p.pid,
+	}).Debug("resizing")
+	return p.hcsProcess.ResizeConsole(uint16(height), uint16(width))
+}
+
+func (c *client) CloseStdin(_ context.Context, containerID, processID string) error {
+	_, p, err := c.getProcess(containerID, processID)
+	if err != nil {
+		return err
+	}
+
+	return p.hcsProcess.CloseStdin()
+}
+
+// Pause handles pause requests for containers
+func (c *client) Pause(_ context.Context, containerID string) error {
+	ctr, _, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return err
+	}
+
+	if ctr.ociSpec.Windows.HyperV == nil {
+		return errors.New("cannot pause Windows Server Containers")
+	}
+
+	ctr.Lock()
+	defer ctr.Unlock()
+
+	if err = ctr.hcsContainer.Pause(); err != nil {
+		return err
+	}
+
+	ctr.status = StatusPaused
+
+	c.eventQ.append(containerID, func() {
+		err := c.backend.ProcessEvent(containerID, EventPaused, EventInfo{
+			ContainerID: containerID,
+			ProcessID:   InitProcessName,
+		})
+		c.logger.WithFields(logrus.Fields{
+			"container": ctr.id,
+			"event":     EventPaused,
+		}).Info("sending event")
+		if err != nil {
+			c.logger.WithError(err).WithFields(logrus.Fields{
+				"container": containerID,
+				"event":     EventPaused,
+			}).Error("failed to process event")
+		}
+	})
+
+	return nil
+}
+
+// Resume handles resume requests for containers
+func (c *client) Resume(_ context.Context, containerID string) error {
+	ctr, _, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return err
+	}
+
+	if ctr.ociSpec.Windows.HyperV == nil {
+		return errors.New("cannot resume Windows Server Containers")
+	}
+
+	ctr.Lock()
+	defer ctr.Unlock()
+
+	if err = ctr.hcsContainer.Resume(); err != nil {
+		return err
+	}
+
+	ctr.status = StatusRunning
+
+	c.eventQ.append(containerID, func() {
+		err := c.backend.ProcessEvent(containerID, EventResumed, EventInfo{
+			ContainerID: containerID,
+			ProcessID:   InitProcessName,
+		})
+		c.logger.WithFields(logrus.Fields{
+			"container": ctr.id,
+			"event":     EventResumed,
+		}).Info("sending event")
+		if err != nil {
+			c.logger.WithError(err).WithFields(logrus.Fields{
+				"container": containerID,
+				"event":     EventResumed,
+			}).Error("failed to process event")
+		}
+	})
+
+	return nil
+}
+
+// Stats handles stats requests for containers
+func (c *client) Stats(_ context.Context, containerID string) (*Stats, error) {
+	ctr, _, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return nil, err
+	}
+
+	readAt := time.Now()
+	s, err := ctr.hcsContainer.Statistics()
+	if err != nil {
+		return nil, err
+	}
+	return &Stats{
+		Read:     readAt,
+		HCSStats: &s,
+	}, nil
+}
+
+// Restore is the handler for restoring a container
+func (c *client) Restore(ctx context.Context, id string, attachStdio StdioCallback) (bool, int, error) {
+	c.logger.WithField("container", id).Debug("restore()")
+
+	// TODO Windows: On RS1, a re-attach isn't possible.
+	// However, there is a scenario in which there is an issue.
+	// Consider a background container. The daemon dies unexpectedly.
+	// HCS will still have the compute service alive and running.
+	// For consistence, we call in to shoot it regardless if HCS knows about it
+	// We explicitly just log a warning if the terminate fails.
+	// Then we tell the backend the container exited.
+	if hc, err := hcsshim.OpenContainer(id); err == nil {
+		const terminateTimeout = time.Minute * 2
+		err := hc.Terminate()
+
+		if hcsshim.IsPending(err) {
+			err = hc.WaitTimeout(terminateTimeout)
+		} else if hcsshim.IsAlreadyStopped(err) {
+			err = nil
+		}
+
+		if err != nil {
+			c.logger.WithField("container", id).WithError(err).Debug("terminate failed on restore")
+			return false, -1, err
+		}
+	}
+	return false, -1, nil
+}
+
+// GetPidsForContainer returns a list of process IDs running in a container.
+// Not used on Windows.
+func (c *client) ListPids(_ context.Context, _ string) ([]uint32, error) {
+	return nil, errors.New("not implemented on Windows")
+}
+
+// Summary returns a summary of the processes running in a container.
+// This is present in Windows to support docker top. In linux, the
+// engine shells out to ps to get process information. On Windows, as
+// the containers could be Hyper-V containers, they would not be
+// visible on the container host. However, libcontainerd does have
+// that information.
+func (c *client) Summary(_ context.Context, containerID string) ([]Summary, error) {
+	ctr, _, err := c.getProcess(containerID, InitProcessName)
+	if err != nil {
+		return nil, err
+	}
+
+	p, err := ctr.hcsContainer.ProcessList()
+	if err != nil {
+		return nil, err
+	}
+
+	pl := make([]Summary, len(p))
+	for i := range p {
+		pl[i] = Summary(p[i])
+	}
+	return pl, nil
+}
+
+func (c *client) DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error) {
+	ec := -1
+	ctr := c.getContainer(containerID)
+	if ctr == nil {
+		return uint32(ec), time.Now(), errors.WithStack(newNotFoundError("no such container"))
+	}
+
+	select {
+	case <-ctx.Done():
+		return uint32(ec), time.Now(), errors.WithStack(ctx.Err())
+	case <-ctr.waitCh:
+	default:
+		return uint32(ec), time.Now(), errors.New("container is not stopped")
+	}
+
+	ctr.Lock()
+	defer ctr.Unlock()
+	return ctr.exitCode, ctr.exitedAt, nil
+}
+
+func (c *client) Delete(_ context.Context, containerID string) error {
+	c.Lock()
+	defer c.Unlock()
+	ctr := c.containers[containerID]
+	if ctr == nil {
+		return errors.WithStack(newNotFoundError("no such container"))
+	}
+
+	ctr.Lock()
+	defer ctr.Unlock()
+
+	switch ctr.status {
+	case StatusCreated:
+		if err := c.shutdownContainer(ctr); err != nil {
+			return err
+		}
+		fallthrough
+	case StatusStopped:
+		delete(c.containers, containerID)
+		return nil
+	}
+
+	return errors.WithStack(newInvalidParameterError("container is not stopped"))
+}
+
+func (c *client) Status(ctx context.Context, containerID string) (Status, error) {
+	c.Lock()
+	defer c.Unlock()
+	ctr := c.containers[containerID]
+	if ctr == nil {
+		return StatusUnknown, errors.WithStack(newNotFoundError("no such container"))
+	}
+
+	ctr.Lock()
+	defer ctr.Unlock()
+	return ctr.status, nil
+}
+
+func (c *client) UpdateResources(ctx context.Context, containerID string, resources *Resources) error {
+	// Updating resource isn't supported on Windows
+	// but we should return nil for enabling updating container
+	return nil
+}
+
+func (c *client) CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error {
+	return errors.New("Windows: Containers do not support checkpoints")
+}
+
+func (c *client) getContainer(id string) *container {
+	c.Lock()
+	ctr := c.containers[id]
+	c.Unlock()
+
+	return ctr
+}
+
+func (c *client) getProcess(containerID, processID string) (*container, *process, error) {
+	ctr := c.getContainer(containerID)
+	switch {
+	case ctr == nil:
+		return nil, nil, errors.WithStack(newNotFoundError("no such container"))
+	case ctr.init == nil:
+		return nil, nil, errors.WithStack(newNotFoundError("container is not running"))
+	case processID == InitProcessName:
+		return ctr, ctr.init, nil
+	default:
+		ctr.Lock()
+		defer ctr.Unlock()
+		if ctr.execs == nil {
+			return nil, nil, errors.WithStack(newNotFoundError("no execs"))
+		}
+	}
+
+	p := ctr.execs[processID]
+	if p == nil {
+		return nil, nil, errors.WithStack(newNotFoundError("no such exec"))
+	}
+
+	return ctr, p, nil
+}
+
+func (c *client) shutdownContainer(ctr *container) error {
+	const shutdownTimeout = time.Minute * 5
+	err := ctr.hcsContainer.Shutdown()
+
+	if hcsshim.IsPending(err) {
+		err = ctr.hcsContainer.WaitTimeout(shutdownTimeout)
+	} else if hcsshim.IsAlreadyStopped(err) {
+		err = nil
+	}
+
+	if err != nil {
+		c.logger.WithError(err).WithField("container", ctr.id).
+			Debug("failed to shutdown container, terminating it")
+		return c.terminateContainer(ctr)
+	}
+
+	return nil
+}
+
+func (c *client) terminateContainer(ctr *container) error {
+	const terminateTimeout = time.Minute * 5
+	err := ctr.hcsContainer.Terminate()
+
+	if hcsshim.IsPending(err) {
+		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
+	} else if hcsshim.IsAlreadyStopped(err) {
+		err = nil
+	}
+
+	if err != nil {
+		c.logger.WithError(err).WithField("container", ctr.id).
+			Debug("failed to terminate container")
+		return err
+	}
+
+	return nil
+}
+
+func (c *client) reapProcess(ctr *container, p *process) int {
+	logger := c.logger.WithFields(logrus.Fields{
+		"container": ctr.id,
+		"process":   p.id,
+	})
+
+	// Block indefinitely for the process to exit.
+	if err := p.hcsProcess.Wait(); err != nil {
+		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
+			logger.WithError(err).Warnf("Wait() failed (container may have been killed)")
+		}
+		// Fall through here, do not return. This ensures we attempt to
+		// continue the shutdown in HCS and tell the docker engine that the
+		// process/container has exited to avoid a container being dropped on
+		// the floor.
+	}
+	exitedAt := time.Now()
+
+	exitCode, err := p.hcsProcess.ExitCode()
+	if err != nil {
+		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
+			logger.WithError(err).Warnf("unable to get exit code for process")
+		}
+		// Since we got an error retrieving the exit code, make sure that the
+		// code we return doesn't incorrectly indicate success.
+		exitCode = -1
+
+		// Fall through here, do not return. This ensures we attempt to
+		// continue the shutdown in HCS and tell the docker engine that the
+		// process/container has exited to avoid a container being dropped on
+		// the floor.
+	}
+
+	if err := p.hcsProcess.Close(); err != nil {
+		logger.WithError(err).Warnf("failed to cleanup hcs process resources")
+	}
+
+	var pendingUpdates bool
+	if p.id == InitProcessName {
+		// Update container status
+		ctr.Lock()
+		ctr.status = StatusStopped
+		ctr.exitedAt = exitedAt
+		ctr.exitCode = uint32(exitCode)
+		close(ctr.waitCh)
+		ctr.Unlock()
+
+		// Handle any servicing
+		if exitCode == 0 && ctr.isWindows && !ctr.ociSpec.Windows.Servicing {
+			pendingUpdates, err = ctr.hcsContainer.HasPendingUpdates()
+			logger.Infof("Pending updates: %v", pendingUpdates)
+			if err != nil {
+				logger.WithError(err).
+					Warnf("failed to check for pending updates (container may have been killed)")
+			}
+		}
+
+		if err := c.shutdownContainer(ctr); err != nil {
+			logger.WithError(err).Warn("failed to shutdown container")
+		} else {
+			logger.Debug("completed container shutdown")
+		}
+
+		if err := ctr.hcsContainer.Close(); err != nil {
+			logger.WithError(err).Error("failed to clean hcs container resources")
+		}
+	}
+
+	if !(ctr.isWindows && ctr.ociSpec.Windows.Servicing) {
+		c.eventQ.append(ctr.id, func() {
+			ei := EventInfo{
+				ContainerID:   ctr.id,
+				ProcessID:     p.id,
+				Pid:           uint32(p.pid),
+				ExitCode:      uint32(exitCode),
+				ExitedAt:      exitedAt,
+				UpdatePending: pendingUpdates,
+			}
+			c.logger.WithFields(logrus.Fields{
+				"container":  ctr.id,
+				"event":      EventExit,
+				"event-info": ei,
+			}).Info("sending event")
+			err := c.backend.ProcessEvent(ctr.id, EventExit, ei)
+			if err != nil {
+				c.logger.WithError(err).WithFields(logrus.Fields{
+					"container":  ctr.id,
+					"event":      EventExit,
+					"event-info": ei,
+				}).Error("failed to process event")
+			}
+			if p.id != InitProcessName {
+				ctr.Lock()
+				delete(ctr.execs, p.id)
+				ctr.Unlock()
+			}
+		})
+	}
+
+	return exitCode
+}
diff --git a/libcontainerd/client_solaris.go b/libcontainerd/client_solaris.go
deleted file mode 100644
index c54cea3..0000000
--- a/libcontainerd/client_solaris.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package libcontainerd
-
-import (
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"golang.org/x/net/context"
-)
-
-type client struct {
-	clientCommon
-
-	// Platform specific properties below here.
-	remote        *remote
-	q             queue
-	exitNotifiers map[string]*exitNotifier
-	liveRestore   bool
-}
-
-// GetServerVersion returns the connected server version information
-func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) {
-	resp, err := clnt.remote.apiClient.GetServerVersion(ctx, &containerd.GetServerVersionRequest{})
-	if err != nil {
-		return nil, err
-	}
-
-	sv := &ServerVersion{
-		GetServerVersionResponse: *resp,
-	}
-
-	return sv, nil
-}
-
-func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process, attachStdio StdioCallback) (int, error) {
-	return -1, nil
-}
-
-func (clnt *client) SignalProcess(containerID string, pid string, sig int) error {
-	return nil
-}
-
-func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
-	return nil
-}
-
-func (clnt *client) Pause(containerID string) error {
-	return nil
-}
-
-func (clnt *client) Resume(containerID string) error {
-	return nil
-}
-
-func (clnt *client) Stats(containerID string) (*Stats, error) {
-	return nil, nil
-}
-
-func (clnt *client) getExitNotifier(containerID string) *exitNotifier {
-	clnt.mapMutex.RLock()
-	defer clnt.mapMutex.RUnlock()
-	return clnt.exitNotifiers[containerID]
-}
-
-func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier {
-	clnt.mapMutex.Lock()
-	defer clnt.mapMutex.Unlock()
-	w, ok := clnt.exitNotifiers[containerID]
-	if !ok {
-		w = &exitNotifier{c: make(chan struct{}), client: clnt}
-		clnt.exitNotifiers[containerID] = w
-	}
-	return w
-}
-
-// Restore is the handler for restoring a container
-func (clnt *client) Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error {
-	return nil
-}
-
-func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
-	return nil, nil
-}
-
-// Summary returns a summary of the processes running in a container.
-func (clnt *client) Summary(containerID string) ([]Summary, error) {
-	return nil, nil
-}
-
-// UpdateResources updates resources for a running container.
-func (clnt *client) UpdateResources(containerID string, resources Resources) error {
-	// Updating resource isn't supported on Solaris
-	// but we should return nil for enabling updating container
-	return nil
-}
-
-func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error {
-	return nil
-}
-
-func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error {
-	return nil
-}
-
-func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) {
-	return nil, nil
-}
diff --git a/libcontainerd/client_unix.go b/libcontainerd/client_unix.go
deleted file mode 100644
index 202a5b0..0000000
--- a/libcontainerd/client_unix.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// +build linux solaris
-
-package libcontainerd
-
-import (
-	"encoding/json"
-	"fmt"
-	"os"
-	"path/filepath"
-	"strings"
-	"sync"
-
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/docker/docker/pkg/idtools"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/sirupsen/logrus"
-	"golang.org/x/net/context"
-)
-
-func (clnt *client) prepareBundleDir(uid, gid int) (string, error) {
-	root, err := filepath.Abs(clnt.remote.stateDir)
-	if err != nil {
-		return "", err
-	}
-	if uid == 0 && gid == 0 {
-		return root, nil
-	}
-	p := string(filepath.Separator)
-	for _, d := range strings.Split(root, string(filepath.Separator))[1:] {
-		p = filepath.Join(p, d)
-		fi, err := os.Stat(p)
-		if err != nil && !os.IsNotExist(err) {
-			return "", err
-		}
-		if os.IsNotExist(err) || fi.Mode()&1 == 0 {
-			p = fmt.Sprintf("%s.%d.%d", p, uid, gid)
-			if err := idtools.MkdirAndChown(p, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) {
-				return "", err
-			}
-		}
-	}
-	return p, nil
-}
-
-func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) (err error) {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-
-	if _, err := clnt.getContainer(containerID); err == nil {
-		return fmt.Errorf("Container %s is already active", containerID)
-	}
-
-	uid, gid, err := getRootIDs(spec)
-	if err != nil {
-		return err
-	}
-	dir, err := clnt.prepareBundleDir(uid, gid)
-	if err != nil {
-		return err
-	}
-
-	container := clnt.newContainer(filepath.Join(dir, containerID), options...)
-	if err := container.clean(); err != nil {
-		return err
-	}
-
-	defer func() {
-		if err != nil {
-			container.clean()
-			clnt.deleteContainer(containerID)
-		}
-	}()
-
-	if err := idtools.MkdirAllAndChown(container.dir, 0700, idtools.IDPair{uid, gid}); err != nil && !os.IsExist(err) {
-		return err
-	}
-
-	f, err := os.Create(filepath.Join(container.dir, configFilename))
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-	if err := json.NewEncoder(f).Encode(spec); err != nil {
-		return err
-	}
-	return container.start(&spec, checkpoint, checkpointDir, attachStdio)
-}
-
-func (clnt *client) Signal(containerID string, sig int) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	_, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{
-		Id:     containerID,
-		Pid:    InitFriendlyName,
-		Signal: uint32(sig),
-	})
-	return err
-}
-
-func (clnt *client) newContainer(dir string, options ...CreateOption) *container {
-	container := &container{
-		containerCommon: containerCommon{
-			process: process{
-				dir: dir,
-				processCommon: processCommon{
-					containerID:  filepath.Base(dir),
-					client:       clnt,
-					friendlyName: InitFriendlyName,
-				},
-			},
-			processes: make(map[string]*process),
-		},
-	}
-	for _, option := range options {
-		if err := option.Apply(container); err != nil {
-			logrus.Errorf("libcontainerd: newContainer(): %v", err)
-		}
-	}
-	return container
-}
-
-type exitNotifier struct {
-	id     string
-	client *client
-	c      chan struct{}
-	once   sync.Once
-}
-
-func (en *exitNotifier) close() {
-	en.once.Do(func() {
-		close(en.c)
-		en.client.mapMutex.Lock()
-		if en == en.client.exitNotifiers[en.id] {
-			delete(en.client.exitNotifiers, en.id)
-		}
-		en.client.mapMutex.Unlock()
-	})
-}
-func (en *exitNotifier) wait() <-chan struct{} {
-	return en.c
-}
diff --git a/libcontainerd/client_windows.go b/libcontainerd/client_windows.go
deleted file mode 100644
index b869f96..0000000
--- a/libcontainerd/client_windows.go
+++ /dev/null
@@ -1,792 +0,0 @@
-package libcontainerd
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"regexp"
-	"strings"
-	"syscall"
-	"time"
-
-	"golang.org/x/net/context"
-
-	"github.com/Microsoft/hcsshim"
-	opengcs "github.com/Microsoft/opengcs/client"
-	"github.com/docker/docker/pkg/sysinfo"
-	"github.com/docker/docker/pkg/system"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/sirupsen/logrus"
-)
-
-type client struct {
-	clientCommon
-
-	// Platform specific properties below here (none presently on Windows)
-}
-
-// Win32 error codes that are used for various workarounds
-// These really should be ALL_CAPS to match golangs syscall library and standard
-// Win32 error conventions, but golint insists on CamelCase.
-const (
-	CoEClassstring     = syscall.Errno(0x800401F3) // Invalid class string
-	ErrorNoNetwork     = syscall.Errno(1222)       // The network is not present or not started
-	ErrorBadPathname   = syscall.Errno(161)        // The specified path is invalid
-	ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object
-)
-
-// defaultOwner is a tag passed to HCS to allow it to differentiate between
-// container creator management stacks. We hard code "docker" in the case
-// of docker.
-const defaultOwner = "docker"
-
-// Create is the entrypoint to create a container from a spec, and if successfully
-// created, start it too. Table below shows the fields required for HCS JSON calling parameters,
-// where if not populated, is omitted.
-// +-----------------+--------------------------------------------+---------------------------------------------------+
-// |                 | Isolation=Process                          | Isolation=Hyper-V                                 |
-// +-----------------+--------------------------------------------+---------------------------------------------------+
-// | VolumePath      | \\?\\Volume{GUIDa}                         |                                                   |
-// | LayerFolderPath | %root%\windowsfilter\containerID           | %root%\windowsfilter\containerID (servicing only) |
-// | Layers[]        | ID=GUIDb;Path=%root%\windowsfilter\layerID | ID=GUIDb;Path=%root%\windowsfilter\layerID        |
-// | HvRuntime       |                                            | ImagePath=%root%\BaseLayerID\UtilityVM            |
-// +-----------------+--------------------------------------------+---------------------------------------------------+
-//
-// Isolation=Process example:
-//
-// {
-//	"SystemType": "Container",
-//	"Name": "5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
-//	"Owner": "docker",
-//	"VolumePath": "\\\\\\\\?\\\\Volume{66d1ef4c-7a00-11e6-8948-00155ddbef9d}",
-//	"IgnoreFlushesDuringBoot": true,
-//	"LayerFolderPath": "C:\\\\control\\\\windowsfilter\\\\5e0055c814a6005b8e57ac59f9a522066e0af12b48b3c26a9416e23907698776",
-//	"Layers": [{
-//		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
-//		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
-//	}],
-//	"HostName": "5e0055c814a6",
-//	"MappedDirectories": [],
-//	"HvPartition": false,
-//	"EndpointList": ["eef2649d-bb17-4d53-9937-295a8efe6f2c"],
-//	"Servicing": false
-//}
-//
-// Isolation=Hyper-V example:
-//
-//{
-//	"SystemType": "Container",
-//	"Name": "475c2c58933b72687a88a441e7e0ca4bd72d76413c5f9d5031fee83b98f6045d",
-//	"Owner": "docker",
-//	"IgnoreFlushesDuringBoot": true,
-//	"Layers": [{
-//		"ID": "18955d65-d45a-557b-bf1c-49d6dfefc526",
-//		"Path": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c"
-//	}],
-//	"HostName": "475c2c58933b",
-//	"MappedDirectories": [],
-//	"HvPartition": true,
-//	"EndpointList": ["e1bb1e61-d56f-405e-b75d-fd520cefa0cb"],
-//	"DNSSearchList": "a.com,b.com,c.com",
-//	"HvRuntime": {
-//		"ImagePath": "C:\\\\control\\\\windowsfilter\\\\65bf96e5760a09edf1790cb229e2dfb2dbd0fcdc0bf7451bae099106bfbfea0c\\\\UtilityVM"
-//	},
-//	"Servicing": false
-//}
-func (clnt *client) Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	if b, err := json.Marshal(spec); err == nil {
-		logrus.Debugln("libcontainerd: client.Create() with spec", string(b))
-	}
-
-	// spec.Linux must be nil for Windows containers, but spec.Windows will be filled in regardless of container platform.
-	// This is a temporary workaround due to LCOW requiring layer folder paths, which are stored under spec.Windows.
-	// TODO: @darrenstahlmsft fix this once the OCI spec is updated to support layer folder paths for LCOW
-	if spec.Linux == nil {
-		return clnt.createWindows(containerID, checkpoint, checkpointDir, spec, attachStdio, options...)
-	}
-	return clnt.createLinux(containerID, checkpoint, checkpointDir, spec, attachStdio, options...)
-}
-
-func (clnt *client) createWindows(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error {
-	configuration := &hcsshim.ContainerConfig{
-		SystemType: "Container",
-		Name:       containerID,
-		Owner:      defaultOwner,
-		IgnoreFlushesDuringBoot: spec.Windows.IgnoreFlushesDuringBoot,
-		HostName:                spec.Hostname,
-		HvPartition:             false,
-		Servicing:               spec.Windows.Servicing,
-	}
-
-	if spec.Windows.Resources != nil {
-		if spec.Windows.Resources.CPU != nil {
-			if spec.Windows.Resources.CPU.Count != nil {
-				// This check is being done here rather than in adaptContainerSettings
-				// because we don't want to update the HostConfig in case this container
-				// is moved to a host with more CPUs than this one.
-				cpuCount := *spec.Windows.Resources.CPU.Count
-				hostCPUCount := uint64(sysinfo.NumCPU())
-				if cpuCount > hostCPUCount {
-					logrus.Warnf("Changing requested CPUCount of %d to current number of processors, %d", cpuCount, hostCPUCount)
-					cpuCount = hostCPUCount
-				}
-				configuration.ProcessorCount = uint32(cpuCount)
-			}
-			if spec.Windows.Resources.CPU.Shares != nil {
-				configuration.ProcessorWeight = uint64(*spec.Windows.Resources.CPU.Shares)
-			}
-			if spec.Windows.Resources.CPU.Maximum != nil {
-				configuration.ProcessorMaximum = int64(*spec.Windows.Resources.CPU.Maximum)
-			}
-		}
-		if spec.Windows.Resources.Memory != nil {
-			if spec.Windows.Resources.Memory.Limit != nil {
-				configuration.MemoryMaximumInMB = int64(*spec.Windows.Resources.Memory.Limit) / 1024 / 1024
-			}
-		}
-		if spec.Windows.Resources.Storage != nil {
-			if spec.Windows.Resources.Storage.Bps != nil {
-				configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps
-			}
-			if spec.Windows.Resources.Storage.Iops != nil {
-				configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops
-			}
-		}
-	}
-
-	if spec.Windows.HyperV != nil {
-		configuration.HvPartition = true
-	}
-
-	if spec.Windows.Network != nil {
-		configuration.EndpointList = spec.Windows.Network.EndpointList
-		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
-		if spec.Windows.Network.DNSSearchList != nil {
-			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
-		}
-		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
-	}
-
-	if cs, ok := spec.Windows.CredentialSpec.(string); ok {
-		configuration.Credentials = cs
-	}
-
-	// We must have least two layers in the spec, the bottom one being a base image,
-	// the top one being the RW layer.
-	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) < 2 {
-		return fmt.Errorf("OCI spec is invalid - at least two LayerFolders must be supplied to the runtime")
-	}
-
-	// Strip off the top-most layer as that's passed in separately to HCS
-	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
-	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
-
-	if configuration.HvPartition {
-		// We don't currently support setting the utility VM image explicitly.
-		// TODO @swernli/jhowardmsft circa RS3/4, this may be re-locatable.
-		if spec.Windows.HyperV.UtilityVMPath != "" {
-			return errors.New("runtime does not support an explicit utility VM path for Hyper-V containers")
-		}
-
-		// Find the upper-most utility VM image.
-		var uvmImagePath string
-		for _, path := range layerFolders {
-			fullPath := filepath.Join(path, "UtilityVM")
-			_, err := os.Stat(fullPath)
-			if err == nil {
-				uvmImagePath = fullPath
-				break
-			}
-			if !os.IsNotExist(err) {
-				return err
-			}
-		}
-		if uvmImagePath == "" {
-			return errors.New("utility VM image could not be found")
-		}
-		configuration.HvRuntime = &hcsshim.HvRuntime{ImagePath: uvmImagePath}
-
-		if spec.Root.Path != "" {
-			return errors.New("OCI spec is invalid - Root.Path must be omitted for a Hyper-V container")
-		}
-	} else {
-		const volumeGUIDRegex = `^\\\\\?\\(Volume)\{{0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}\}\\$`
-		if _, err := regexp.MatchString(volumeGUIDRegex, spec.Root.Path); err != nil {
-			return fmt.Errorf(`OCI spec is invalid - Root.Path '%s' must be a volume GUID path in the format '\\?\Volume{GUID}\'`, spec.Root.Path)
-		}
-		// HCS API requires the trailing backslash to be removed
-		configuration.VolumePath = spec.Root.Path[:len(spec.Root.Path)-1]
-	}
-
-	if spec.Root.Readonly {
-		return errors.New(`OCI spec is invalid - Root.Readonly must not be set on Windows`)
-	}
-
-	for _, layerPath := range layerFolders {
-		_, filename := filepath.Split(layerPath)
-		g, err := hcsshim.NameToGuid(filename)
-		if err != nil {
-			return err
-		}
-		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
-			ID:   g.ToString(),
-			Path: layerPath,
-		})
-	}
-
-	// Add the mounts (volumes, bind mounts etc) to the structure
-	var mds []hcsshim.MappedDir
-	var mps []hcsshim.MappedPipe
-	for _, mount := range spec.Mounts {
-		const pipePrefix = `\\.\pipe\`
-		if mount.Type != "" {
-			return fmt.Errorf("OCI spec is invalid - Mount.Type '%s' must not be set", mount.Type)
-		}
-		if strings.HasPrefix(mount.Destination, pipePrefix) {
-			mp := hcsshim.MappedPipe{
-				HostPath:          mount.Source,
-				ContainerPipeName: mount.Destination[len(pipePrefix):],
-			}
-			mps = append(mps, mp)
-		} else {
-			md := hcsshim.MappedDir{
-				HostPath:      mount.Source,
-				ContainerPath: mount.Destination,
-				ReadOnly:      false,
-			}
-			for _, o := range mount.Options {
-				if strings.ToLower(o) == "ro" {
-					md.ReadOnly = true
-				}
-			}
-			mds = append(mds, md)
-		}
-	}
-	configuration.MappedDirectories = mds
-	if len(mps) > 0 && system.GetOSVersion().Build < 16210 { // replace with Win10 RS3 build number at RTM
-		return errors.New("named pipe mounts are not supported on this version of Windows")
-	}
-	configuration.MappedPipes = mps
-
-	hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
-	if err != nil {
-		return err
-	}
-
-	// Construct a container object for calling start on it.
-	container := &container{
-		containerCommon: containerCommon{
-			process: process{
-				processCommon: processCommon{
-					containerID:  containerID,
-					client:       clnt,
-					friendlyName: InitFriendlyName,
-				},
-			},
-			processes: make(map[string]*process),
-		},
-		isWindows:    true,
-		ociSpec:      spec,
-		hcsContainer: hcsContainer,
-	}
-
-	container.options = options
-	for _, option := range options {
-		if err := option.Apply(container); err != nil {
-			logrus.Errorf("libcontainerd: %v", err)
-		}
-	}
-
-	// Call start, and if it fails, delete the container from our
-	// internal structure, start will keep HCS in sync by deleting the
-	// container there.
-	logrus.Debugf("libcontainerd: createWindows() id=%s, Calling start()", containerID)
-	if err := container.start(attachStdio); err != nil {
-		clnt.deleteContainer(containerID)
-		return err
-	}
-
-	logrus.Debugf("libcontainerd: createWindows() id=%s completed successfully", containerID)
-	return nil
-
-}
-
-func (clnt *client) createLinux(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error {
-	logrus.Debugf("libcontainerd: createLinux(): containerId %s ", containerID)
-
-	var lcowOpt *LCOWOption
-	for _, option := range options {
-		if lcow, ok := option.(*LCOWOption); ok {
-			lcowOpt = lcow
-		}
-	}
-	if lcowOpt == nil || lcowOpt.Config == nil {
-		return fmt.Errorf("lcow option must be supplied to the runtime")
-	}
-
-	configuration := &hcsshim.ContainerConfig{
-		HvPartition:   true,
-		Name:          containerID,
-		SystemType:    "container",
-		ContainerType: "linux",
-		Owner:         defaultOwner,
-		TerminateOnLastHandleClosed: true,
-	}
-
-	if lcowOpt.Config.ActualMode == opengcs.ModeActualVhdx {
-		configuration.HvRuntime = &hcsshim.HvRuntime{
-			ImagePath:          lcowOpt.Config.Vhdx,
-			BootSource:         "Vhd",
-			WritableBootSource: true,
-		}
-	} else {
-		configuration.HvRuntime = &hcsshim.HvRuntime{
-			ImagePath:           lcowOpt.Config.KirdPath,
-			LinuxKernelFile:     lcowOpt.Config.KernelFile,
-			LinuxInitrdFile:     lcowOpt.Config.InitrdFile,
-			LinuxBootParameters: lcowOpt.Config.BootParameters,
-		}
-	}
-
-	if spec.Windows == nil {
-		return fmt.Errorf("spec.Windows must not be nil for LCOW containers")
-	}
-
-	// We must have least one layer in the spec
-	if spec.Windows.LayerFolders == nil || len(spec.Windows.LayerFolders) == 0 {
-		return fmt.Errorf("OCI spec is invalid - at least one LayerFolders must be supplied to the runtime")
-	}
-
-	// Strip off the top-most layer as that's passed in separately to HCS
-	configuration.LayerFolderPath = spec.Windows.LayerFolders[len(spec.Windows.LayerFolders)-1]
-	layerFolders := spec.Windows.LayerFolders[:len(spec.Windows.LayerFolders)-1]
-
-	for _, layerPath := range layerFolders {
-		_, filename := filepath.Split(layerPath)
-		g, err := hcsshim.NameToGuid(filename)
-		if err != nil {
-			return err
-		}
-		configuration.Layers = append(configuration.Layers, hcsshim.Layer{
-			ID:   g.ToString(),
-			Path: filepath.Join(layerPath, "layer.vhd"),
-		})
-	}
-
-	if spec.Windows.Network != nil {
-		configuration.EndpointList = spec.Windows.Network.EndpointList
-		configuration.AllowUnqualifiedDNSQuery = spec.Windows.Network.AllowUnqualifiedDNSQuery
-		if spec.Windows.Network.DNSSearchList != nil {
-			configuration.DNSSearchList = strings.Join(spec.Windows.Network.DNSSearchList, ",")
-		}
-		configuration.NetworkSharedContainerName = spec.Windows.Network.NetworkSharedContainerName
-	}
-
-	hcsContainer, err := hcsshim.CreateContainer(containerID, configuration)
-	if err != nil {
-		return err
-	}
-
-	// Construct a container object for calling start on it.
-	container := &container{
-		containerCommon: containerCommon{
-			process: process{
-				processCommon: processCommon{
-					containerID:  containerID,
-					client:       clnt,
-					friendlyName: InitFriendlyName,
-				},
-			},
-			processes: make(map[string]*process),
-		},
-		ociSpec:      spec,
-		hcsContainer: hcsContainer,
-	}
-
-	container.options = options
-	for _, option := range options {
-		if err := option.Apply(container); err != nil {
-			logrus.Errorf("libcontainerd: createLinux() %v", err)
-		}
-	}
-
-	// Call start, and if it fails, delete the container from our
-	// internal structure, start will keep HCS in sync by deleting the
-	// container there.
-	logrus.Debugf("libcontainerd: createLinux() id=%s, Calling start()", containerID)
-	if err := container.start(attachStdio); err != nil {
-		clnt.deleteContainer(containerID)
-		return err
-	}
-
-	logrus.Debugf("libcontainerd: createLinux() id=%s completed successfully", containerID)
-	return nil
-}
-
-// AddProcess is the handler for adding a process to an already running
-// container. It's called through docker exec. It returns the system pid of the
-// exec'd process.
-func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process, attachStdio StdioCallback) (int, error) {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	container, err := clnt.getContainer(containerID)
-	if err != nil {
-		return -1, err
-	}
-	// Note we always tell HCS to
-	// create stdout as it's required regardless of '-i' or '-t' options, so that
-	// docker can always grab the output through logs. We also tell HCS to always
-	// create stdin, even if it's not used - it will be closed shortly. Stderr
-	// is only created if it we're not -t.
-	createProcessParms := hcsshim.ProcessConfig{
-		CreateStdInPipe:  true,
-		CreateStdOutPipe: true,
-		CreateStdErrPipe: !procToAdd.Terminal,
-	}
-	if procToAdd.Terminal {
-		createProcessParms.EmulateConsole = true
-		if procToAdd.ConsoleSize != nil {
-			createProcessParms.ConsoleSize[0] = uint(procToAdd.ConsoleSize.Height)
-			createProcessParms.ConsoleSize[1] = uint(procToAdd.ConsoleSize.Width)
-		}
-	}
-
-	// Take working directory from the process to add if it is defined,
-	// otherwise take from the first process.
-	if procToAdd.Cwd != "" {
-		createProcessParms.WorkingDirectory = procToAdd.Cwd
-	} else {
-		createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd
-	}
-
-	// Configure the environment for the process
-	createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env)
-	if container.isWindows {
-		createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ")
-	} else {
-		createProcessParms.CommandArgs = procToAdd.Args
-	}
-	createProcessParms.User = procToAdd.User.Username
-
-	logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine)
-
-	// Start the command running in the container.
-	var stdout, stderr io.ReadCloser
-	var stdin io.WriteCloser
-	newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms)
-	if err != nil {
-		logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err)
-		return -1, err
-	}
-
-	pid := newProcess.Pid()
-
-	stdin, stdout, stderr, err = newProcess.Stdio()
-	if err != nil {
-		logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err)
-		return -1, err
-	}
-
-	iopipe := &IOPipe{Terminal: procToAdd.Terminal}
-	iopipe.Stdin = createStdInCloser(stdin, newProcess)
-
-	// Convert io.ReadClosers to io.Readers
-	if stdout != nil {
-		iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
-	}
-	if stderr != nil {
-		iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
-	}
-
-	proc := &process{
-		processCommon: processCommon{
-			containerID:  containerID,
-			friendlyName: processFriendlyName,
-			client:       clnt,
-			systemPid:    uint32(pid),
-		},
-		hcsProcess: newProcess,
-	}
-
-	// Add the process to the container's list of processes
-	container.processes[processFriendlyName] = proc
-
-	// Tell the engine to attach streams back to the client
-	if err := attachStdio(*iopipe); err != nil {
-		return -1, err
-	}
-
-	// Spin up a go routine waiting for exit to handle cleanup
-	go container.waitExit(proc, false)
-
-	return pid, nil
-}
-
-// Signal handles `docker stop` on Windows. While Linux has support for
-// the full range of signals, signals aren't really implemented on Windows.
-// We fake supporting regular stop and -9 to force kill.
-func (clnt *client) Signal(containerID string, sig int) error {
-	var (
-		cont *container
-		err  error
-	)
-
-	// Get the container as we need it to get the container handle.
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	if cont, err = clnt.getContainer(containerID); err != nil {
-		return err
-	}
-
-	cont.manualStopRequested = true
-
-	logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid)
-
-	if syscall.Signal(sig) == syscall.SIGKILL {
-		// Terminate the compute system
-		if err := cont.hcsContainer.Terminate(); err != nil {
-			if !hcsshim.IsPending(err) {
-				logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err)
-			}
-		}
-	} else {
-		// Shut down the container
-		if err := cont.hcsContainer.Shutdown(); err != nil {
-			if !hcsshim.IsPending(err) && !hcsshim.IsAlreadyStopped(err) {
-				// ignore errors
-				logrus.Warnf("libcontainerd: failed to shutdown container %s: %q", containerID, err)
-			}
-		}
-	}
-
-	return nil
-}
-
-// While Linux has support for the full range of signals, signals aren't really implemented on Windows.
-// We try to terminate the specified process whatever signal is requested.
-func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error {
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	cont, err := clnt.getContainer(containerID)
-	if err != nil {
-		return err
-	}
-
-	for _, p := range cont.processes {
-		if p.friendlyName == processFriendlyName {
-			return p.hcsProcess.Kill()
-		}
-	}
-
-	return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID)
-}
-
-// Resize handles a CLI event to resize an interactive docker run or docker exec
-// window.
-func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error {
-	// Get the libcontainerd container object
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	cont, err := clnt.getContainer(containerID)
-	if err != nil {
-		return err
-	}
-
-	h, w := uint16(height), uint16(width)
-
-	if processFriendlyName == InitFriendlyName {
-		logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid)
-		return cont.process.hcsProcess.ResizeConsole(w, h)
-	}
-
-	for _, p := range cont.processes {
-		if p.friendlyName == processFriendlyName {
-			logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid)
-			return p.hcsProcess.ResizeConsole(w, h)
-		}
-	}
-
-	return fmt.Errorf("Resize could not find containerID %s to resize", containerID)
-
-}
-
-// Pause handles pause requests for containers
-func (clnt *client) Pause(containerID string) error {
-	unlockContainer := true
-	// Get the libcontainerd container object
-	clnt.lock(containerID)
-	defer func() {
-		if unlockContainer {
-			clnt.unlock(containerID)
-		}
-	}()
-	container, err := clnt.getContainer(containerID)
-	if err != nil {
-		return err
-	}
-
-	if container.ociSpec.Windows.HyperV == nil {
-		return errors.New("cannot pause Windows Server Containers")
-	}
-
-	err = container.hcsContainer.Pause()
-	if err != nil {
-		return err
-	}
-
-	// Unlock container before calling back into the daemon
-	unlockContainer = false
-	clnt.unlock(containerID)
-
-	return clnt.backend.StateChanged(containerID, StateInfo{
-		CommonStateInfo: CommonStateInfo{
-			State: StatePause,
-		}})
-}
-
-// Resume handles resume requests for containers
-func (clnt *client) Resume(containerID string) error {
-	unlockContainer := true
-	// Get the libcontainerd container object
-	clnt.lock(containerID)
-	defer func() {
-		if unlockContainer {
-			clnt.unlock(containerID)
-		}
-	}()
-	container, err := clnt.getContainer(containerID)
-	if err != nil {
-		return err
-	}
-
-	// This should never happen, since Windows Server Containers cannot be paused
-
-	if container.ociSpec.Windows.HyperV == nil {
-		return errors.New("cannot resume Windows Server Containers")
-	}
-
-	err = container.hcsContainer.Resume()
-	if err != nil {
-		return err
-	}
-
-	// Unlock container before calling back into the daemon
-	unlockContainer = false
-	clnt.unlock(containerID)
-
-	return clnt.backend.StateChanged(containerID, StateInfo{
-		CommonStateInfo: CommonStateInfo{
-			State: StateResume,
-		}})
-}
-
-// Stats handles stats requests for containers
-func (clnt *client) Stats(containerID string) (*Stats, error) {
-	// Get the libcontainerd container object
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	container, err := clnt.getContainer(containerID)
-	if err != nil {
-		return nil, err
-	}
-	s, err := container.hcsContainer.Statistics()
-	if err != nil {
-		return nil, err
-	}
-	st := Stats(s)
-	return &st, nil
-}
-
-// Restore is the handler for restoring a container
-func (clnt *client) Restore(containerID string, _ StdioCallback, unusedOnWindows ...CreateOption) error {
-	logrus.Debugf("libcontainerd: Restore(%s)", containerID)
-
-	// TODO Windows: On RS1, a re-attach isn't possible.
-	// However, there is a scenario in which there is an issue.
-	// Consider a background container. The daemon dies unexpectedly.
-	// HCS will still have the compute service alive and running.
-	// For consistence, we call in to shoot it regardless if HCS knows about it
-	// We explicitly just log a warning if the terminate fails.
-	// Then we tell the backend the container exited.
-	if hc, err := hcsshim.OpenContainer(containerID); err == nil {
-		const terminateTimeout = time.Minute * 2
-		err := hc.Terminate()
-
-		if hcsshim.IsPending(err) {
-			err = hc.WaitTimeout(terminateTimeout)
-		} else if hcsshim.IsAlreadyStopped(err) {
-			err = nil
-		}
-
-		if err != nil {
-			logrus.Warnf("libcontainerd: failed to terminate %s on restore - %q", containerID, err)
-			return err
-		}
-	}
-	return clnt.backend.StateChanged(containerID, StateInfo{
-		CommonStateInfo: CommonStateInfo{
-			State:    StateExit,
-			ExitCode: 1 << 31,
-		}})
-}
-
-// GetPidsForContainer returns a list of process IDs running in a container.
-// Not used on Windows.
-func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) {
-	return nil, errors.New("not implemented on Windows")
-}
-
-// Summary returns a summary of the processes running in a container.
-// This is present in Windows to support docker top. In linux, the
-// engine shells out to ps to get process information. On Windows, as
-// the containers could be Hyper-V containers, they would not be
-// visible on the container host. However, libcontainerd does have
-// that information.
-func (clnt *client) Summary(containerID string) ([]Summary, error) {
-
-	// Get the libcontainerd container object
-	clnt.lock(containerID)
-	defer clnt.unlock(containerID)
-	container, err := clnt.getContainer(containerID)
-	if err != nil {
-		return nil, err
-	}
-	p, err := container.hcsContainer.ProcessList()
-	if err != nil {
-		return nil, err
-	}
-	pl := make([]Summary, len(p))
-	for i := range p {
-		pl[i] = Summary(p[i])
-	}
-	return pl, nil
-}
-
-// UpdateResources updates resources for a running container.
-func (clnt *client) UpdateResources(containerID string, resources Resources) error {
-	// Updating resource isn't supported on Windows
-	// but we should return nil for enabling updating container
-	return nil
-}
-
-func (clnt *client) CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error {
-	return errors.New("Windows: Containers do not support checkpoints")
-}
-
-func (clnt *client) DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error {
-	return errors.New("Windows: Containers do not support checkpoints")
-}
-
-func (clnt *client) ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error) {
-	return nil, errors.New("Windows: Containers do not support checkpoints")
-}
-
-func (clnt *client) GetServerVersion(ctx context.Context) (*ServerVersion, error) {
-	return &ServerVersion{}, nil
-}
diff --git a/libcontainerd/container.go b/libcontainerd/container.go
deleted file mode 100644
index b403213..0000000
--- a/libcontainerd/container.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package libcontainerd
-
-const (
-	// InitFriendlyName is the name given in the lookup map of processes
-	// for the first process started in a container.
-	InitFriendlyName = "init"
-	configFilename   = "config.json"
-)
-
-type containerCommon struct {
-	process
-	processes map[string]*process
-}
diff --git a/libcontainerd/container_unix.go b/libcontainerd/container_unix.go
deleted file mode 100644
index 9a7dbf0..0000000
--- a/libcontainerd/container_unix.go
+++ /dev/null
@@ -1,246 +0,0 @@
-// +build linux solaris
-
-package libcontainerd
-
-import (
-	"encoding/json"
-	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	"sync"
-	"time"
-
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/docker/docker/pkg/ioutils"
-	specs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/sirupsen/logrus"
-	"github.com/tonistiigi/fifo"
-	"golang.org/x/net/context"
-	"golang.org/x/sys/unix"
-)
-
-type container struct {
-	containerCommon
-
-	// Platform specific fields are below here.
-	pauseMonitor
-	oom         bool
-	runtime     string
-	runtimeArgs []string
-}
-
-type runtime struct {
-	path string
-	args []string
-}
-
-// WithRuntime sets the runtime to be used for the created container
-func WithRuntime(path string, args []string) CreateOption {
-	return runtime{path, args}
-}
-
-func (rt runtime) Apply(p interface{}) error {
-	if pr, ok := p.(*container); ok {
-		pr.runtime = rt.path
-		pr.runtimeArgs = rt.args
-	}
-	return nil
-}
-
-func (ctr *container) clean() error {
-	if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" {
-		return nil
-	}
-	if _, err := os.Lstat(ctr.dir); err != nil {
-		if os.IsNotExist(err) {
-			return nil
-		}
-		return err
-	}
-
-	if err := os.RemoveAll(ctr.dir); err != nil {
-		return err
-	}
-	return nil
-}
-
-// cleanProcess removes the fifos used by an additional process.
-// Caller needs to lock container ID before calling this method.
-func (ctr *container) cleanProcess(id string) {
-	if p, ok := ctr.processes[id]; ok {
-		for _, i := range []int{unix.Stdin, unix.Stdout, unix.Stderr} {
-			if err := os.Remove(p.fifo(i)); err != nil && !os.IsNotExist(err) {
-				logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err)
-			}
-		}
-	}
-	delete(ctr.processes, id)
-}
-
-func (ctr *container) spec() (*specs.Spec, error) {
-	var spec specs.Spec
-	dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename))
-	if err != nil {
-		return nil, err
-	}
-	if err := json.Unmarshal(dt, &spec); err != nil {
-		return nil, err
-	}
-	return &spec, nil
-}
-
-func (ctr *container) start(spec *specs.Spec, checkpoint, checkpointDir string, attachStdio StdioCallback) (err error) {
-	ctx, cancel := context.WithCancel(context.Background())
-	defer cancel()
-	ready := make(chan struct{})
-
-	fifoCtx, cancel := context.WithCancel(context.Background())
-	defer func() {
-		if err != nil {
-			cancel()
-		}
-	}()
-
-	iopipe, err := ctr.openFifos(fifoCtx, spec.Process.Terminal)
-	if err != nil {
-		return err
-	}
-
-	var stdinOnce sync.Once
-
-	// we need to delay stdin closure after container start or else "stdin close"
-	// event will be rejected by containerd.
-	// stdin closure happens in attachStdio
-	stdin := iopipe.Stdin
-	iopipe.Stdin = ioutils.NewWriteCloserWrapper(stdin, func() error {
-		var err error
-		stdinOnce.Do(func() { // on error from attach we don't know if stdin was already closed
-			err = stdin.Close()
-			go func() {
-				select {
-				case <-ready:
-				case <-ctx.Done():
-				}
-				select {
-				case <-ready:
-					if err := ctr.sendCloseStdin(); err != nil {
-						logrus.Warnf("failed to close stdin: %+v", err)
-					}
-				default:
-				}
-			}()
-		})
-		return err
-	})
-
-	r := &containerd.CreateContainerRequest{
-		Id:            ctr.containerID,
-		BundlePath:    ctr.dir,
-		Stdin:         ctr.fifo(unix.Stdin),
-		Stdout:        ctr.fifo(unix.Stdout),
-		Stderr:        ctr.fifo(unix.Stderr),
-		Checkpoint:    checkpoint,
-		CheckpointDir: checkpointDir,
-		// check to see if we are running in ramdisk to disable pivot root
-		NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "",
-		Runtime:     ctr.runtime,
-		RuntimeArgs: ctr.runtimeArgs,
-	}
-	ctr.client.appendContainer(ctr)
-
-	if err := attachStdio(*iopipe); err != nil {
-		ctr.closeFifos(iopipe)
-		return err
-	}
-
-	resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r)
-	if err != nil {
-		ctr.closeFifos(iopipe)
-		return err
-	}
-	ctr.systemPid = systemPid(resp.Container)
-	close(ready)
-
-	return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{
-		CommonStateInfo: CommonStateInfo{
-			State: StateStart,
-			Pid:   ctr.systemPid,
-		}})
-
-}
-
-func (ctr *container) newProcess(friendlyName string) *process {
-	return &process{
-		dir: ctr.dir,
-		processCommon: processCommon{
-			containerID:  ctr.containerID,
-			friendlyName: friendlyName,
-			client:       ctr.client,
-		},
-	}
-}
-
-func (ctr *container) handleEvent(e *containerd.Event) error {
-	ctr.client.lock(ctr.containerID)
-	defer ctr.client.unlock(ctr.containerID)
-	switch e.Type {
-	case StateExit, StatePause, StateResume, StateOOM:
-		st := StateInfo{
-			CommonStateInfo: CommonStateInfo{
-				State:    e.Type,
-				ExitCode: e.Status,
-			},
-			OOMKilled: e.Type == StateExit && ctr.oom,
-		}
-		if e.Type == StateOOM {
-			ctr.oom = true
-		}
-		if e.Type == StateExit && e.Pid != InitFriendlyName {
-			st.ProcessID = e.Pid
-			st.State = StateExitProcess
-		}
-
-		// Remove process from list if we have exited
-		switch st.State {
-		case StateExit:
-			ctr.clean()
-			ctr.client.deleteContainer(e.Id)
-		case StateExitProcess:
-			ctr.cleanProcess(st.ProcessID)
-		}
-		ctr.client.q.append(e.Id, func() {
-			if err := ctr.client.backend.StateChanged(e.Id, st); err != nil {
-				logrus.Errorf("libcontainerd: backend.StateChanged(): %v", err)
-			}
-			if e.Type == StatePause || e.Type == StateResume {
-				ctr.pauseMonitor.handle(e.Type)
-			}
-			if e.Type == StateExit {
-				if en := ctr.client.getExitNotifier(e.Id); en != nil {
-					en.close()
-				}
-			}
-		})
-
-	default:
-		logrus.Debugf("libcontainerd: event unhandled: %+v", e)
-	}
-	return nil
-}
-
-// discardFifos attempts to fully read the container fifos to unblock processes
-// that may be blocked on the writer side.
-func (ctr *container) discardFifos() {
-	ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
-	for _, i := range []int{unix.Stdout, unix.Stderr} {
-		f, err := fifo.OpenFifo(ctx, ctr.fifo(i), unix.O_RDONLY|unix.O_NONBLOCK, 0)
-		if err != nil {
-			logrus.Warnf("error opening fifo %v for discarding: %+v", f, err)
-			continue
-		}
-		go func() {
-			io.Copy(ioutil.Discard, f)
-		}()
-	}
-}
diff --git a/libcontainerd/container_windows.go b/libcontainerd/container_windows.go
deleted file mode 100644
index 06f9c82..0000000
--- a/libcontainerd/container_windows.go
+++ /dev/null
@@ -1,335 +0,0 @@
-package libcontainerd
-
-import (
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"strings"
-	"time"
-
-	"github.com/Microsoft/hcsshim"
-	"github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/sirupsen/logrus"
-	"golang.org/x/sys/windows"
-)
-
-type container struct {
-	containerCommon
-
-	// Platform specific fields are below here. There are none presently on Windows.
-	options []CreateOption
-
-	// The ociSpec is required, as client.Create() needs a spec,
-	// but can be called from the RestartManager context which does not
-	// otherwise have access to the Spec
-	ociSpec specs.Spec
-
-	isWindows           bool
-	manualStopRequested bool
-	hcsContainer        hcsshim.Container
-}
-
-func (ctr *container) newProcess(friendlyName string) *process {
-	return &process{
-		processCommon: processCommon{
-			containerID:  ctr.containerID,
-			friendlyName: friendlyName,
-			client:       ctr.client,
-		},
-	}
-}
-
-// start starts a created container.
-// Caller needs to lock container ID before calling this method.
-func (ctr *container) start(attachStdio StdioCallback) error {
-	var err error
-
-	// Start the container.  If this is a servicing container, this call will block
-	// until the container is done with the servicing execution.
-	logrus.Debugln("libcontainerd: starting container ", ctr.containerID)
-	if err = ctr.hcsContainer.Start(); err != nil {
-		logrus.Errorf("libcontainerd: failed to start container: %s", err)
-		if err := ctr.terminate(); err != nil {
-			logrus.Errorf("libcontainerd: failed to cleanup after a failed Start. %s", err)
-		} else {
-			logrus.Debugln("libcontainerd: cleaned up after failed Start by calling Terminate")
-		}
-		return err
-	}
-
-	// Note we always tell HCS to
-	// create stdout as it's required regardless of '-i' or '-t' options, so that
-	// docker can always grab the output through logs. We also tell HCS to always
-	// create stdin, even if it's not used - it will be closed shortly. Stderr
-	// is only created if it we're not -t.
-	var (
-		emulateConsole   bool
-		createStdErrPipe bool
-	)
-	if ctr.ociSpec.Process != nil {
-		emulateConsole = ctr.ociSpec.Process.Terminal
-		createStdErrPipe = !ctr.ociSpec.Process.Terminal && !ctr.ociSpec.Windows.Servicing
-	}
-
-	createProcessParms := &hcsshim.ProcessConfig{
-		EmulateConsole:   emulateConsole,
-		WorkingDirectory: ctr.ociSpec.Process.Cwd,
-		CreateStdInPipe:  !ctr.ociSpec.Windows.Servicing,
-		CreateStdOutPipe: !ctr.ociSpec.Windows.Servicing,
-		CreateStdErrPipe: createStdErrPipe,
-	}
-
-	if ctr.ociSpec.Process != nil && ctr.ociSpec.Process.ConsoleSize != nil {
-		createProcessParms.ConsoleSize[0] = uint(ctr.ociSpec.Process.ConsoleSize.Height)
-		createProcessParms.ConsoleSize[1] = uint(ctr.ociSpec.Process.ConsoleSize.Width)
-	}
-
-	// Configure the environment for the process
-	createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env)
-	if ctr.isWindows {
-		createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ")
-	} else {
-		createProcessParms.CommandArgs = ctr.ociSpec.Process.Args
-	}
-	createProcessParms.User = ctr.ociSpec.Process.User.Username
-
-	// LCOW requires the raw OCI spec passed through HCS and onwards to GCS for the utility VM.
-	if !ctr.isWindows {
-		ociBuf, err := json.Marshal(ctr.ociSpec)
-		if err != nil {
-			return err
-		}
-		ociRaw := json.RawMessage(ociBuf)
-		createProcessParms.OCISpecification = &ociRaw
-	}
-
-	// Start the command running in the container.
-	newProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms)
-	if err != nil {
-		logrus.Errorf("libcontainerd: CreateProcess() failed %s", err)
-		if err := ctr.terminate(); err != nil {
-			logrus.Errorf("libcontainerd: failed to cleanup after a failed CreateProcess. %s", err)
-		} else {
-			logrus.Debugln("libcontainerd: cleaned up after failed CreateProcess by calling Terminate")
-		}
-		return err
-	}
-
-	pid := newProcess.Pid()
-
-	// Save the hcs Process and PID
-	ctr.process.friendlyName = InitFriendlyName
-	ctr.process.hcsProcess = newProcess
-
-	// If this is a servicing container, wait on the process synchronously here and
-	// if it succeeds, wait for it cleanly shutdown and merge into the parent container.
-	if ctr.ociSpec.Windows.Servicing {
-		exitCode := ctr.waitProcessExitCode(&ctr.process)
-
-		if exitCode != 0 {
-			if err := ctr.terminate(); err != nil {
-				logrus.Warnf("libcontainerd: terminating servicing container %s failed: %s", ctr.containerID, err)
-			}
-			return fmt.Errorf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode)
-		}
-
-		return ctr.hcsContainer.WaitTimeout(time.Minute * 5)
-	}
-
-	var stdout, stderr io.ReadCloser
-	var stdin io.WriteCloser
-	stdin, stdout, stderr, err = newProcess.Stdio()
-	if err != nil {
-		logrus.Errorf("libcontainerd: failed to get stdio pipes: %s", err)
-		if err := ctr.terminate(); err != nil {
-			logrus.Errorf("libcontainerd: failed to cleanup after a failed Stdio. %s", err)
-		}
-		return err
-	}
-
-	iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal}
-
-	iopipe.Stdin = createStdInCloser(stdin, newProcess)
-
-	// Convert io.ReadClosers to io.Readers
-	if stdout != nil {
-		iopipe.Stdout = ioutil.NopCloser(&autoClosingReader{ReadCloser: stdout})
-	}
-	if stderr != nil {
-		iopipe.Stderr = ioutil.NopCloser(&autoClosingReader{ReadCloser: stderr})
-	}
-
-	// Save the PID
-	logrus.Debugf("libcontainerd: process started - PID %d", pid)
-	ctr.systemPid = uint32(pid)
-
-	// Spin up a go routine waiting for exit to handle cleanup
-	go ctr.waitExit(&ctr.process, true)
-
-	ctr.client.appendContainer(ctr)
-
-	if err := attachStdio(*iopipe); err != nil {
-		// OK to return the error here, as waitExit will handle tear-down in HCS
-		return err
-	}
-
-	// Tell the docker engine that the container has started.
-	si := StateInfo{
-		CommonStateInfo: CommonStateInfo{
-			State: StateStart,
-			Pid:   ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft
-		}}
-	logrus.Debugf("libcontainerd: start() completed OK, %+v", si)
-	return ctr.client.backend.StateChanged(ctr.containerID, si)
-
-}
-
-// waitProcessExitCode will wait for the given process to exit and return its error code.
-func (ctr *container) waitProcessExitCode(process *process) int {
-	// Block indefinitely for the process to exit.
-	err := process.hcsProcess.Wait()
-	if err != nil {
-		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
-			logrus.Warnf("libcontainerd: Wait() failed (container may have been killed): %s", err)
-		}
-		// Fall through here, do not return. This ensures we attempt to continue the
-		// shutdown in HCS and tell the docker engine that the process/container
-		// has exited to avoid a container being dropped on the floor.
-	}
-
-	exitCode, err := process.hcsProcess.ExitCode()
-	if err != nil {
-		if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != windows.ERROR_BROKEN_PIPE {
-			logrus.Warnf("libcontainerd: unable to get exit code from container %s", ctr.containerID)
-		}
-		// Since we got an error retrieving the exit code, make sure that the code we return
-		// doesn't incorrectly indicate success.
-		exitCode = -1
-
-		// Fall through here, do not return. This ensures we attempt to continue the
-		// shutdown in HCS and tell the docker engine that the process/container
-		// has exited to avoid a container being dropped on the floor.
-	}
-
-	return exitCode
-}
-
-// waitExit runs as a goroutine waiting for the process to exit. It's
-// equivalent to (in the linux containerd world) where events come in for
-// state change notifications from containerd.
-func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error {
-	logrus.Debugln("libcontainerd: waitExit() on pid", process.systemPid)
-
-	exitCode := ctr.waitProcessExitCode(process)
-	// Lock the container while removing the process/container from the list
-	ctr.client.lock(ctr.containerID)
-
-	if !isFirstProcessToStart {
-		ctr.cleanProcess(process.friendlyName)
-	} else {
-		ctr.client.deleteContainer(ctr.containerID)
-	}
-
-	// Unlock here so other threads are unblocked
-	ctr.client.unlock(ctr.containerID)
-
-	// Assume the container has exited
-	si := StateInfo{
-		CommonStateInfo: CommonStateInfo{
-			State:     StateExit,
-			ExitCode:  uint32(exitCode),
-			Pid:       process.systemPid,
-			ProcessID: process.friendlyName,
-		},
-		UpdatePending: false,
-	}
-
-	// But it could have been an exec'd process which exited
-	if !isFirstProcessToStart {
-		si.State = StateExitProcess
-	} else {
-		// Pending updates is only applicable for WCOW
-		if ctr.isWindows {
-			updatePending, err := ctr.hcsContainer.HasPendingUpdates()
-			if err != nil {
-				logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err)
-			} else {
-				si.UpdatePending = updatePending
-			}
-		}
-
-		logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID)
-		if err := ctr.shutdown(); err != nil {
-			logrus.Debugf("libcontainerd: failed to shutdown container %s", ctr.containerID)
-		} else {
-			logrus.Debugf("libcontainerd: completed shutting down container %s", ctr.containerID)
-		}
-		if err := ctr.hcsContainer.Close(); err != nil {
-			logrus.Error(err)
-		}
-	}
-
-	if err := process.hcsProcess.Close(); err != nil {
-		logrus.Errorf("libcontainerd: hcsProcess.Close(): %v", err)
-	}
-
-	// Call into the backend to notify it of the state change.
-	logrus.Debugf("libcontainerd: waitExit() calling backend.StateChanged %+v", si)
-	if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil {
-		logrus.Error(err)
-	}
-
-	logrus.Debugf("libcontainerd: waitExit() completed OK, %+v", si)
-
-	return nil
-}
-
-// cleanProcess removes process from the map.
-// Caller needs to lock container ID before calling this method.
-func (ctr *container) cleanProcess(id string) {
-	delete(ctr.processes, id)
-}
-
-// shutdown shuts down the container in HCS
-// Caller needs to lock container ID before calling this method.
-func (ctr *container) shutdown() error {
-	const shutdownTimeout = time.Minute * 5
-	err := ctr.hcsContainer.Shutdown()
-	if hcsshim.IsPending(err) {
-		// Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely.
-		err = ctr.hcsContainer.WaitTimeout(shutdownTimeout)
-	} else if hcsshim.IsAlreadyStopped(err) {
-		err = nil
-	}
-
-	if err != nil {
-		logrus.Debugf("libcontainerd: error shutting down container %s %v calling terminate", ctr.containerID, err)
-		if err := ctr.terminate(); err != nil {
-			return err
-		}
-		return err
-	}
-
-	return nil
-}
-
-// terminate terminates the container in HCS
-// Caller needs to lock container ID before calling this method.
-func (ctr *container) terminate() error {
-	const terminateTimeout = time.Minute * 5
-	err := ctr.hcsContainer.Terminate()
-
-	if hcsshim.IsPending(err) {
-		err = ctr.hcsContainer.WaitTimeout(terminateTimeout)
-	} else if hcsshim.IsAlreadyStopped(err) {
-		err = nil
-	}
-
-	if err != nil {
-		logrus.Debugf("libcontainerd: error terminating container %s %v", ctr.containerID, err)
-		return err
-	}
-
-	return nil
-}
diff --git a/libcontainerd/errors.go b/libcontainerd/errors.go
new file mode 100644
index 0000000..db59ea8
--- /dev/null
+++ b/libcontainerd/errors.go
@@ -0,0 +1,46 @@
+package libcontainerd
+
+import "errors"
+
+type liberr struct {
+	err error
+}
+
+func (e liberr) Error() string {
+	return e.err.Error()
+}
+
+func (e liberr) Cause() error {
+	return e.err
+}
+
+type notFoundErr struct {
+	liberr
+}
+
+func (notFoundErr) NotFound() {}
+
+func newNotFoundError(err string) error { return notFoundErr{liberr{errors.New(err)}} }
+func wrapNotFoundError(err error) error { return notFoundErr{liberr{err}} }
+
+type invalidParamErr struct {
+	liberr
+}
+
+func (invalidParamErr) InvalidParameter() {}
+
+func newInvalidParameterError(err string) error { return invalidParamErr{liberr{errors.New(err)}} }
+
+type conflictErr struct {
+	liberr
+}
+
+func (conflictErr) ConflictErr() {}
+
+func newConflictError(err string) error { return conflictErr{liberr{errors.New(err)}} }
+
+type sysErr struct {
+	liberr
+}
+
+func wrapSystemError(err error) error { return sysErr{liberr{err}} }
diff --git a/libcontainerd/io.go b/libcontainerd/io.go
new file mode 100644
index 0000000..2c4af58
--- /dev/null
+++ b/libcontainerd/io.go
@@ -0,0 +1,36 @@
+package libcontainerd
+
+import "github.com/containerd/containerd"
+
+// Config returns the containerd.IOConfig of this pipe set
+func (p *IOPipe) Config() containerd.IOConfig {
+	return p.config
+}
+
+// Cancel aborts ongoing operations if they have not completed yet
+func (p *IOPipe) Cancel() {
+	p.cancel()
+}
+
+// Wait waits for io operations to finish
+func (p *IOPipe) Wait() {
+}
+
+// Close closes the underlying pipes
+func (p *IOPipe) Close() error {
+	p.cancel()
+
+	if p.Stdin != nil {
+		p.Stdin.Close()
+	}
+
+	if p.Stdout != nil {
+		p.Stdout.Close()
+	}
+
+	if p.Stderr != nil {
+		p.Stderr.Close()
+	}
+
+	return nil
+}
diff --git a/libcontainerd/io_unix.go b/libcontainerd/io_unix.go
new file mode 100644
index 0000000..0c08b20
--- /dev/null
+++ b/libcontainerd/io_unix.go
@@ -0,0 +1,60 @@
+// +build !windows
+
+package libcontainerd
+
+import (
+	"context"
+	"io"
+	"syscall"
+
+	"github.com/containerd/containerd"
+	"github.com/containerd/fifo"
+	"github.com/pkg/errors"
+)
+
+func newIOPipe(fifos *containerd.FIFOSet) (*IOPipe, error) {
+	var (
+		err         error
+		ctx, cancel = context.WithCancel(context.Background())
+		f           io.ReadWriteCloser
+		iop         = &IOPipe{
+			Terminal: fifos.Terminal,
+			cancel:   cancel,
+			config: containerd.IOConfig{
+				Terminal: fifos.Terminal,
+				Stdin:    fifos.In,
+				Stdout:   fifos.Out,
+				Stderr:   fifos.Err,
+			},
+		}
+	)
+	defer func() {
+		if err != nil {
+			cancel()
+			iop.Close()
+		}
+	}()
+
+	if fifos.In != "" {
+		if f, err = fifo.OpenFifo(ctx, fifos.In, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+			return nil, errors.WithStack(err)
+		}
+		iop.Stdin = f
+	}
+
+	if fifos.Out != "" {
+		if f, err = fifo.OpenFifo(ctx, fifos.Out, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+			return nil, errors.WithStack(err)
+		}
+		iop.Stdout = f
+	}
+
+	if fifos.Err != "" {
+		if f, err = fifo.OpenFifo(ctx, fifos.Err, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+			return nil, errors.WithStack(err)
+		}
+		iop.Stderr = f
+	}
+
+	return iop, nil
+}
diff --git a/libcontainerd/io_windows.go b/libcontainerd/io_windows.go
new file mode 100644
index 0000000..312bdbd
--- /dev/null
+++ b/libcontainerd/io_windows.go
@@ -0,0 +1,138 @@
+package libcontainerd
+
+import (
+	"context"
+	"io"
+	"net"
+	"sync"
+
+	winio "github.com/Microsoft/go-winio"
+	"github.com/containerd/containerd"
+	"github.com/pkg/errors"
+)
+
+type winpipe struct {
+	sync.Mutex
+
+	ctx      context.Context
+	listener net.Listener
+	readyCh  chan struct{}
+	readyErr error
+
+	client net.Conn
+}
+
+func newWinpipe(ctx context.Context, pipe string) (*winpipe, error) {
+	l, err := winio.ListenPipe(pipe, nil)
+	if err != nil {
+		return nil, errors.Wrapf(err, "%q pipe creation failed", pipe)
+	}
+	wp := &winpipe{
+		ctx:      ctx,
+		listener: l,
+		readyCh:  make(chan struct{}),
+	}
+	go func() {
+		go func() {
+			defer close(wp.readyCh)
+			defer wp.listener.Close()
+			c, err := wp.listener.Accept()
+			if err != nil {
+				wp.Lock()
+				if wp.readyErr == nil {
+					wp.readyErr = err
+				}
+				wp.Unlock()
+				return
+			}
+			wp.client = c
+		}()
+
+		select {
+		case <-wp.readyCh:
+		case <-ctx.Done():
+			wp.Lock()
+			if wp.readyErr == nil {
+				wp.listener.Close()
+				wp.readyErr = ctx.Err()
+			}
+			wp.Unlock()
+		}
+	}()
+
+	return wp, nil
+}
+
+func (wp *winpipe) Read(b []byte) (int, error) {
+	select {
+	case <-wp.ctx.Done():
+		return 0, wp.ctx.Err()
+	case <-wp.readyCh:
+		return wp.client.Read(b)
+	}
+}
+
+func (wp *winpipe) Write(b []byte) (int, error) {
+	select {
+	case <-wp.ctx.Done():
+		return 0, wp.ctx.Err()
+	case <-wp.readyCh:
+		return wp.client.Write(b)
+	}
+}
+
+func (wp *winpipe) Close() error {
+	select {
+	case <-wp.readyCh:
+		return wp.client.Close()
+	default:
+		return nil
+	}
+}
+
+func newIOPipe(fifos *containerd.FIFOSet) (*IOPipe, error) {
+	var (
+		err         error
+		ctx, cancel = context.WithCancel(context.Background())
+		p           io.ReadWriteCloser
+		iop         = &IOPipe{
+			Terminal: fifos.Terminal,
+			cancel:   cancel,
+			config: containerd.IOConfig{
+				Terminal: fifos.Terminal,
+				Stdin:    fifos.In,
+				Stdout:   fifos.Out,
+				Stderr:   fifos.Err,
+			},
+		}
+	)
+	defer func() {
+		if err != nil {
+			cancel()
+			iop.Close()
+		}
+	}()
+
+	if fifos.In != "" {
+		if p, err = newWinpipe(ctx, fifos.In); err != nil {
+			return nil, err
+		}
+		iop.Stdin = p
+	}
+
+	if fifos.Out != "" {
+		if p, err = newWinpipe(ctx, fifos.Out); err != nil {
+			return nil, err
+		}
+		iop.Stdout = p
+	}
+
+	if fifos.Err != "" {
+		if p, err = newWinpipe(ctx, fifos.Err); err != nil {
+			return nil, err
+		}
+		iop.Stderr = p
+	}
+
+	return iop, nil
+}
diff --git a/libcontainerd/oom_linux.go b/libcontainerd/oom_linux.go
deleted file mode 100644
index 70f0dac..0000000
--- a/libcontainerd/oom_linux.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package libcontainerd
-
-import (
-	"fmt"
-	"os"
-	"strconv"
-
-	"github.com/opencontainers/runc/libcontainer/system"
-	"github.com/sirupsen/logrus"
-)
-
-func setOOMScore(pid, score int) error {
-	oomScoreAdjPath := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
-	f, err := os.OpenFile(oomScoreAdjPath, os.O_WRONLY, 0)
-	if err != nil {
-		return err
-	}
-	stringScore := strconv.Itoa(score)
-	_, err = f.WriteString(stringScore)
-	f.Close()
-	if os.IsPermission(err) {
-		// Setting oom_score_adj does not work in an
-		// unprivileged container. Ignore the error, but log
-		// it if we appear not to be in that situation.
-		if !system.RunningInUserNS() {
-			logrus.Debugf("Permission denied writing %q to %s", stringScore, oomScoreAdjPath)
-		}
-		return nil
-	}
-	return err
-}
diff --git a/libcontainerd/oom_solaris.go b/libcontainerd/oom_solaris.go
deleted file mode 100644
index 2ebe5e8..0000000
--- a/libcontainerd/oom_solaris.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package libcontainerd
-
-func setOOMScore(pid, score int) error {
-	return nil
-}
diff --git a/libcontainerd/pausemonitor_unix.go b/libcontainerd/pausemonitor_unix.go
deleted file mode 100644
index 4f3766d..0000000
--- a/libcontainerd/pausemonitor_unix.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build !windows
-
-package libcontainerd
-
-import (
-	"sync"
-)
-
-// pauseMonitor is helper to get notifications from pause state changes.
-type pauseMonitor struct {
-	sync.Mutex
-	waiters map[string][]chan struct{}
-}
-
-func (m *pauseMonitor) handle(t string) {
-	m.Lock()
-	defer m.Unlock()
-	if m.waiters == nil {
-		return
-	}
-	q, ok := m.waiters[t]
-	if !ok {
-		return
-	}
-	if len(q) > 0 {
-		close(q[0])
-		m.waiters[t] = q[1:]
-	}
-}
-
-func (m *pauseMonitor) append(t string, waiter chan struct{}) {
-	m.Lock()
-	defer m.Unlock()
-	if m.waiters == nil {
-		m.waiters = make(map[string][]chan struct{})
-	}
-	_, ok := m.waiters[t]
-	if !ok {
-		m.waiters[t] = make([]chan struct{}, 0)
-	}
-	m.waiters[t] = append(m.waiters[t], waiter)
-}
diff --git a/libcontainerd/process.go b/libcontainerd/process.go
deleted file mode 100644
index 57562c8..0000000
--- a/libcontainerd/process.go
+++ /dev/null
@@ -1,18 +0,0 @@
-package libcontainerd
-
-// processCommon are the platform common fields as part of the process structure
-// which keeps the state for the main container process, as well as any exec
-// processes.
-type processCommon struct {
-	client *client
-
-	// containerID is the Container ID
-	containerID string
-
-	// friendlyName is an identifier for the process (or `InitFriendlyName`
-	// for the first process)
-	friendlyName string
-
-	// systemPid is the PID of the main container process
-	systemPid uint32
-}
diff --git a/libcontainerd/process_unix.go b/libcontainerd/process_unix.go
deleted file mode 100644
index 3b54e32..0000000
--- a/libcontainerd/process_unix.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// +build linux solaris
-
-package libcontainerd
-
-import (
-	"io"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-	goruntime "runtime"
-	"strings"
-
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/tonistiigi/fifo"
-	"golang.org/x/net/context"
-	"golang.org/x/sys/unix"
-)
-
-var fdNames = map[int]string{
-	unix.Stdin:  "stdin",
-	unix.Stdout: "stdout",
-	unix.Stderr: "stderr",
-}
-
-// process keeps the state for both main container process and exec process.
-type process struct {
-	processCommon
-
-	// Platform specific fields are below here.
-	dir string
-}
-
-func (p *process) openFifos(ctx context.Context, terminal bool) (pipe *IOPipe, err error) {
-	if err := os.MkdirAll(p.dir, 0700); err != nil {
-		return nil, err
-	}
-
-	io := &IOPipe{}
-
-	io.Stdin, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdin), unix.O_WRONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700)
-	if err != nil {
-		return nil, err
-	}
-
-	defer func() {
-		if err != nil {
-			io.Stdin.Close()
-		}
-	}()
-
-	io.Stdout, err = fifo.OpenFifo(ctx, p.fifo(unix.Stdout), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700)
-	if err != nil {
-		return nil, err
-	}
-
-	defer func() {
-		if err != nil {
-			io.Stdout.Close()
-		}
-	}()
-
-	if goruntime.GOOS == "solaris" || !terminal {
-		// For Solaris terminal handling is done exclusively by the runtime therefore we make no distinction
-		// in the processing for terminal and !terminal cases.
-		io.Stderr, err = fifo.OpenFifo(ctx, p.fifo(unix.Stderr), unix.O_RDONLY|unix.O_CREAT|unix.O_NONBLOCK, 0700)
-		if err != nil {
-			return nil, err
-		}
-		defer func() {
-			if err != nil {
-				io.Stderr.Close()
-			}
-		}()
-	} else {
-		io.Stderr = ioutil.NopCloser(emptyReader{})
-	}
-
-	return io, nil
-}
-
-func (p *process) sendCloseStdin() error {
-	_, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{
-		Id:         p.containerID,
-		Pid:        p.friendlyName,
-		CloseStdin: true,
-	})
-	if err != nil && (strings.Contains(err.Error(), "container not found") || strings.Contains(err.Error(), "process not found")) {
-		return nil
-	}
-	return err
-}
-
-func (p *process) closeFifos(io *IOPipe) {
-	io.Stdin.Close()
-	io.Stdout.Close()
-	io.Stderr.Close()
-}
-
-type emptyReader struct{}
-
-func (r emptyReader) Read(b []byte) (int, error) {
-	return 0, io.EOF
-}
-
-func (p *process) fifo(index int) string {
-	return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index])
-}
diff --git a/libcontainerd/process_windows.go b/libcontainerd/process_windows.go
index 854c4dd..76b92a6 100644
--- a/libcontainerd/process_windows.go
+++ b/libcontainerd/process_windows.go
@@ -8,14 +8,6 @@
 	"github.com/docker/docker/pkg/ioutils"
 )
 
-// process keeps the state for both main container process and exec process.
-type process struct {
-	processCommon
-
-	// Platform specific fields are below here.
-	hcsProcess hcsshim.Process
-}
-
 type autoClosingReader struct {
 	io.ReadCloser
 	sync.Once
@@ -23,7 +15,7 @@
 
 func (r *autoClosingReader) Read(b []byte) (n int, err error) {
 	n, err = r.ReadCloser.Read(b)
-	if err == io.EOF {
+	if err != nil {
 		r.Once.Do(func() { r.ReadCloser.Close() })
 	}
 	return
@@ -46,3 +38,7 @@
 		return nil
 	})
 }
+
+func (p *process) Cleanup() error {
+	return nil
+}
diff --git a/libcontainerd/queue.go b/libcontainerd/queue.go
new file mode 100644
index 0000000..38d74a0
--- /dev/null
+++ b/libcontainerd/queue.go
@@ -0,0 +1,35 @@
+package libcontainerd
+
+import "sync"
+
+type queue struct {
+	sync.Mutex
+	fns map[string]chan struct{}
+}
+
+func (q *queue) append(id string, f func()) {
+	q.Lock()
+	defer q.Unlock()
+
+	if q.fns == nil {
+		q.fns = make(map[string]chan struct{})
+	}
+
+	done := make(chan struct{})
+
+	fn, ok := q.fns[id]
+	q.fns[id] = done
+	go func() {
+		if ok {
+			<-fn
+		}
+		f()
+		close(done)
+
+		q.Lock()
+		if q.fns[id] == done {
+			delete(q.fns, id)
+		}
+		q.Unlock()
+	}()
+}
diff --git a/libcontainerd/queue_test.go b/libcontainerd/queue_test.go
new file mode 100644
index 0000000..902f48a
--- /dev/null
+++ b/libcontainerd/queue_test.go
@@ -0,0 +1,31 @@
+package libcontainerd
+
+import (
+	"testing"
+	"time"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestSerialization(t *testing.T) {
+	var (
+		q             queue
+		serialization = 1
+	)
+
+	q.append("aaa", func() {
+		//simulate a long time task
+		time.Sleep(10 * time.Millisecond)
+		require.EqualValues(t, serialization, 1)
+		serialization = 2
+	})
+	q.append("aaa", func() {
+		require.EqualValues(t, serialization, 2)
+		serialization = 3
+	})
+	q.append("aaa", func() {
+		require.EqualValues(t, serialization, 3)
+		serialization = 4
+	})
+	time.Sleep(20 * time.Millisecond)
+}
diff --git a/libcontainerd/queue_unix.go b/libcontainerd/queue_unix.go
deleted file mode 100644
index 66765f7..0000000
--- a/libcontainerd/queue_unix.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build linux solaris
-
-package libcontainerd
-
-import "sync"
-
-type queue struct {
-	sync.Mutex
-	fns map[string]chan struct{}
-}
-
-func (q *queue) append(id string, f func()) {
-	q.Lock()
-	defer q.Unlock()
-
-	if q.fns == nil {
-		q.fns = make(map[string]chan struct{})
-	}
-
-	done := make(chan struct{})
-
-	fn, ok := q.fns[id]
-	q.fns[id] = done
-	go func() {
-		if ok {
-			<-fn
-		}
-		f()
-		close(done)
-
-		q.Lock()
-		if q.fns[id] == done {
-			delete(q.fns, id)
-		}
-		q.Unlock()
-	}()
-}
diff --git a/libcontainerd/queue_unix_test.go b/libcontainerd/queue_unix_test.go
deleted file mode 100644
index bb49a5d..0000000
--- a/libcontainerd/queue_unix_test.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build linux solaris
-
-package libcontainerd
-
-import (
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/require"
-)
-
-func TestSerialization(t *testing.T) {
-	var (
-		q             queue
-		serialization = 1
-	)
-
-	q.append("aaa", func() {
-		//simulate a long time task
-		time.Sleep(10 * time.Millisecond)
-		require.EqualValues(t, serialization, 1)
-		serialization = 2
-	})
-	q.append("aaa", func() {
-		require.EqualValues(t, serialization, 2)
-		serialization = 3
-	})
-	q.append("aaa", func() {
-		require.EqualValues(t, serialization, 3)
-		serialization = 4
-	})
-	time.Sleep(20 * time.Millisecond)
-}
diff --git a/libcontainerd/remote.go b/libcontainerd/remote.go
deleted file mode 100644
index 9031e3a..0000000
--- a/libcontainerd/remote.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package libcontainerd
-
-// Remote on Linux defines the accesspoint to the containerd grpc API.
-// Remote on Windows is largely an unimplemented interface as there is
-// no remote containerd.
-type Remote interface {
-	// Client returns a new Client instance connected with given Backend.
-	Client(Backend) (Client, error)
-	// Cleanup stops containerd if it was started by libcontainerd.
-	// Note this is not used on Windows as there is no remote containerd.
-	Cleanup()
-	// UpdateOptions allows various remote options to be updated at runtime.
-	UpdateOptions(...RemoteOption) error
-}
-
-// RemoteOption allows to configure parameters of remotes.
-// This is unused on Windows.
-type RemoteOption interface {
-	Apply(Remote) error
-}
diff --git a/libcontainerd/remote_daemon.go b/libcontainerd/remote_daemon.go
new file mode 100644
index 0000000..e6fd05f
--- /dev/null
+++ b/libcontainerd/remote_daemon.go
@@ -0,0 +1,317 @@
+// +build !windows
+
+package libcontainerd
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/BurntSushi/toml"
+	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/server"
+	"github.com/docker/docker/pkg/system"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+const (
+	maxConnectionRetryCount = 3
+	healthCheckTimeout      = 3 * time.Second
+	shutdownTimeout         = 15 * time.Second
+	configFile              = "containerd.toml"
+	binaryName              = "docker-containerd"
+	pidFile                 = "docker-containerd.pid"
+)
+
+type pluginConfigs struct {
+	Plugins map[string]interface{} `toml:"plugins"`
+}
+
+type remote struct {
+	sync.RWMutex
+	server.Config
+
+	daemonPid int
+	logger    *logrus.Entry
+
+	daemonWaitCh    chan struct{}
+	clients         []*client
+	shutdownContext context.Context
+	shutdownCancel  context.CancelFunc
+	shutdown        bool
+
+	// Options
+	startDaemon bool
+	rootDir     string
+	stateDir    string
+	snapshotter string
+	pluginConfs pluginConfigs
+}
+
+// New creates a fresh instance of libcontainerd remote.
+func New(rootDir, stateDir string, options ...RemoteOption) (rem Remote, err error) {
+	defer func() {
+		if err != nil {
+			err = errors.Wrap(err, "Failed to connect to containerd")
+		}
+	}()
+
+	r := &remote{
+		rootDir:  rootDir,
+		stateDir: stateDir,
+		Config: server.Config{
+			Root:  filepath.Join(rootDir, "daemon"),
+			State: filepath.Join(stateDir, "daemon"),
+		},
+		pluginConfs: pluginConfigs{make(map[string]interface{})},
+		daemonPid:   -1,
+		logger:      logrus.WithField("module", "libcontainerd"),
+	}
+	r.shutdownContext, r.shutdownCancel = context.WithCancel(context.Background())
+
+	rem = r
+	for _, option := range options {
+		if err = option.Apply(r); err != nil {
+			return
+		}
+	}
+	r.setDefaults()
+
+	if err = system.MkdirAll(stateDir, 0700, ""); err != nil {
+		return
+	}
+
+	if r.startDaemon {
+		os.Remove(r.GRPC.Address)
+		if err = r.startContainerd(); err != nil {
+			return
+		}
+		defer func() {
+			if err != nil {
+				r.Cleanup()
+			}
+		}()
+	}
+
+	// This connection is just used to monitor the connection
+	client, err := containerd.New(r.GRPC.Address)
+	if err != nil {
+		return
+	}
+	if _, err := client.Version(context.Background()); err != nil {
+		system.KillProcess(r.daemonPid)
+		return nil, errors.Wrapf(err, "unable to get containerd version")
+	}
+
+	go r.monitorConnection(client)
+
+	return r, nil
+}
+
+func (r *remote) NewClient(ns string, b Backend) (Client, error) {
+	c := &client{
+		stateDir:   r.stateDir,
+		logger:     r.logger.WithField("namespace", ns),
+		namespace:  ns,
+		backend:    b,
+		containers: make(map[string]*container),
+	}
+
+	rclient, err := containerd.New(r.GRPC.Address, containerd.WithDefaultNamespace(ns))
+	if err != nil {
+		return nil, err
+	}
+	c.remote = rclient
+
+	go c.processEventStream(r.shutdownContext)
+
+	r.Lock()
+	r.clients = append(r.clients, c)
+	r.Unlock()
+	return c, nil
+}
+
+func (r *remote) Cleanup() {
+	if r.daemonPid != -1 {
+		r.shutdownCancel()
+		r.stopDaemon()
+	}
+
+	// cleanup some files
+	os.Remove(filepath.Join(r.stateDir, pidFile))
+
+	r.platformCleanup()
+}
+
+func (r *remote) getContainerdPid() (int, error) {
+	pidFile := filepath.Join(r.stateDir, pidFile)
+	f, err := os.OpenFile(pidFile, os.O_RDWR, 0600)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return -1, nil
+		}
+		return -1, err
+	}
+	defer f.Close()
+
+	b := make([]byte, 8)
+	n, err := f.Read(b)
+	if err != nil && err != io.EOF {
+		return -1, err
+	}
+
+	if n > 0 {
+		pid, err := strconv.ParseUint(string(b[:n]), 10, 64)
+		if err != nil {
+			return -1, err
+		}
+		if system.IsProcessAlive(int(pid)) {
+			return int(pid), nil
+		}
+	}
+
+	return -1, nil
+}
+
+func (r *remote) getContainerdConfig() (string, error) {
+	path := filepath.Join(r.stateDir, configFile)
+	f, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0600)
+	if err != nil {
+		return "", errors.Wrapf(err, "failed to open containerd config file at %s", path)
+	}
+	defer f.Close()
+
+	enc := toml.NewEncoder(f)
+	if err = enc.Encode(r.Config); err != nil {
+		return "", errors.Wrapf(err, "failed to encode general config")
+	}
+	if err = enc.Encode(r.pluginConfs); err != nil {
+		return "", errors.Wrapf(err, "failed to encode plugin configs")
+	}
+
+	return path, nil
+}
+
+func (r *remote) startContainerd() error {
+	pid, err := r.getContainerdPid()
+	if err != nil {
+		return err
+	}
+
+	if pid != -1 {
+		r.daemonPid = pid
+		logrus.WithField("pid", pid).
+			Infof("libcontainerd: %s is still running", binaryName)
+		return nil
+	}
+
+	configFile, err := r.getContainerdConfig()
+	if err != nil {
+		return err
+	}
+
+	args := []string{"--config", configFile}
+	cmd := exec.Command(binaryName, args...)
+	// redirect containerd logs to docker logs
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+	cmd.SysProcAttr = containerdSysProcAttr()
+	// clear the NOTIFY_SOCKET from the env when starting containerd
+	cmd.Env = nil
+	for _, e := range os.Environ() {
+		if !strings.HasPrefix(e, "NOTIFY_SOCKET") {
+			cmd.Env = append(cmd.Env, e)
+		}
+	}
+	if err := cmd.Start(); err != nil {
+		return err
+	}
+
+	r.daemonWaitCh = make(chan struct{})
+	go func() {
+		// Reap our child when needed
+		if err := cmd.Wait(); err != nil {
+			r.logger.WithError(err).Errorf("containerd did not exit successfully")
+		}
+		close(r.daemonWaitCh)
+	}()
+
+	r.daemonPid = cmd.Process.Pid
+
+	err = ioutil.WriteFile(filepath.Join(r.stateDir, pidFile), []byte(fmt.Sprintf("%d", r.daemonPid)), 0660)
+	if err != nil {
+		system.KillProcess(r.daemonPid)
+		return errors.Wrap(err, "libcontainerd: failed to save daemon pid to disk")
+	}
+
+	logrus.WithField("pid", r.daemonPid).
+		Infof("libcontainerd: started new %s process", binaryName)
+
+	return nil
+}
+
+func (r *remote) monitorConnection(client *containerd.Client) {
+	var transientFailureCount = 0
+
+	ticker := time.NewTicker(500 * time.Millisecond)
+	defer ticker.Stop()
+
+	for {
+		<-ticker.C
+		ctx, cancel := context.WithTimeout(r.shutdownContext, healthCheckTimeout)
+		_, err := client.IsServing(ctx)
+		cancel()
+		if err == nil {
+			transientFailureCount = 0
+			continue
+		}
+
+		select {
+		case <-r.shutdownContext.Done():
+			r.logger.Info("stopping healtcheck following graceful shutdown")
+			client.Close()
+			return
+		default:
+		}
+
+		r.logger.WithError(err).WithField("binary", binaryName).Debug("daemon is not responding")
+
+		if r.daemonPid != -1 {
+			transientFailureCount++
+			if transientFailureCount >= maxConnectionRetryCount || !system.IsProcessAlive(r.daemonPid) {
+				transientFailureCount = 0
+				if system.IsProcessAlive(r.daemonPid) {
+					r.logger.WithField("pid", r.daemonPid).Info("killing and restarting containerd")
+					// Try to get a stack trace
+					syscall.Kill(r.daemonPid, syscall.SIGUSR1)
+					<-time.After(100 * time.Millisecond)
+					system.KillProcess(r.daemonPid)
+				}
+				<-r.daemonWaitCh
+				var err error
+				client.Close()
+				os.Remove(r.GRPC.Address)
+				if err = r.startContainerd(); err != nil {
+					r.logger.WithError(err).Error("failed restarting containerd")
+				} else {
+					newClient, err := containerd.New(r.GRPC.Address)
+					if err != nil {
+						r.logger.WithError(err).Error("failed connect to containerd")
+					} else {
+						client = newClient
+					}
+				}
+			}
+		}
+	}
+}
diff --git a/libcontainerd/remote_daemon_linux.go b/libcontainerd/remote_daemon_linux.go
new file mode 100644
index 0000000..e99a4fd
--- /dev/null
+++ b/libcontainerd/remote_daemon_linux.go
@@ -0,0 +1,54 @@
+package libcontainerd
+
+import (
+	"os"
+	"path/filepath"
+	"syscall"
+	"time"
+
+	"github.com/docker/docker/pkg/system"
+)
+
+const (
+	sockFile      = "docker-containerd.sock"
+	debugSockFile = "docker-containerd-debug.sock"
+)
+
+func (r *remote) setDefaults() {
+	if r.GRPC.Address == "" {
+		r.GRPC.Address = filepath.Join(r.stateDir, sockFile)
+	}
+	if r.Debug.Address == "" {
+		r.Debug.Address = filepath.Join(r.stateDir, debugSockFile)
+	}
+	if r.Debug.Level == "" {
+		r.Debug.Level = "info"
+	}
+	if r.OOMScore == 0 {
+		r.OOMScore = -999
+	}
+	if r.snapshotter == "" {
+		r.snapshotter = "overlay"
+	}
+}
+
+func (r *remote) stopDaemon() {
+	// Ask the daemon to quit
+	syscall.Kill(r.daemonPid, syscall.SIGTERM)
+	// Wait up to 15secs for it to stop
+	for i := time.Duration(0); i < shutdownTimeout; i += time.Second {
+		if !system.IsProcessAlive(r.daemonPid) {
+			break
+		}
+		time.Sleep(time.Second)
+	}
+
+	if system.IsProcessAlive(r.daemonPid) {
+		r.logger.WithField("pid", r.daemonPid).Warn("daemon didn't stop within 15 secs, killing it")
+		syscall.Kill(r.daemonPid, syscall.SIGKILL)
+	}
+}
+
+func (r *remote) platformCleanup() {
+	os.Remove(filepath.Join(r.stateDir, sockFile))
+}
diff --git a/libcontainerd/remote_daemon_options.go b/libcontainerd/remote_daemon_options.go
new file mode 100644
index 0000000..b167f64
--- /dev/null
+++ b/libcontainerd/remote_daemon_options.go
@@ -0,0 +1,141 @@
+// +build !windows
+
+package libcontainerd
+
+import "fmt"
+
+// WithRemoteAddr sets the external containerd socket to connect to.
+func WithRemoteAddr(addr string) RemoteOption {
+	return rpcAddr(addr)
+}
+
+type rpcAddr string
+
+func (a rpcAddr) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.GRPC.Address = string(a)
+		return nil
+	}
+	return fmt.Errorf("WithRemoteAddr option not supported for this remote")
+}
+
+// WithRemoteAddrUser sets the uid and gid to create the RPC address with
+func WithRemoteAddrUser(uid, gid int) RemoteOption {
+	return rpcUser{uid, gid}
+}
+
+type rpcUser struct {
+	uid int
+	gid int
+}
+
+func (u rpcUser) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.GRPC.Uid = u.uid
+		remote.GRPC.Gid = u.gid
+		return nil
+	}
+	return fmt.Errorf("WithRemoteAddr option not supported for this remote")
+}
+
+// WithStartDaemon defines if libcontainerd should also run containerd daemon.
+func WithStartDaemon(start bool) RemoteOption {
+	return startDaemon(start)
+}
+
+type startDaemon bool
+
+func (s startDaemon) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.startDaemon = bool(s)
+		return nil
+	}
+	return fmt.Errorf("WithStartDaemon option not supported for this remote")
+}
+
+// WithLogLevel defines which log level to starts containerd with.
+// This only makes sense if WithStartDaemon() was set to true.
+func WithLogLevel(lvl string) RemoteOption {
+	return logLevel(lvl)
+}
+
+type logLevel string
+
+func (l logLevel) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.Debug.Level = string(l)
+		return nil
+	}
+	return fmt.Errorf("WithDebugLog option not supported for this remote")
+}
+
+// WithDebugAddress defines at which location the debug GRPC connection
+// should be made
+func WithDebugAddress(addr string) RemoteOption {
+	return debugAddress(addr)
+}
+
+type debugAddress string
+
+func (d debugAddress) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.Debug.Address = string(d)
+		return nil
+	}
+	return fmt.Errorf("WithDebugAddress option not supported for this remote")
+}
+
+// WithMetricsAddress defines at which location the debug GRPC connection
+// should be made
+func WithMetricsAddress(addr string) RemoteOption {
+	return metricsAddress(addr)
+}
+
+type metricsAddress string
+
+func (m metricsAddress) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.Metrics.Address = string(m)
+		return nil
+	}
+	return fmt.Errorf("WithMetricsAddress option not supported for this remote")
+}
+
+// WithSnapshotter defines snapshotter driver should be used
+func WithSnapshotter(name string) RemoteOption {
+	return snapshotter(name)
+}
+
+type snapshotter string
+
+func (s snapshotter) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.snapshotter = string(s)
+		return nil
+	}
+	return fmt.Errorf("WithSnapshotter option not supported for this remote")
+}
+
+// WithPlugin allow configuring a containerd plugin
+// configuration values passed needs to be quoted if quotes are needed in
+// the toml format.
+func WithPlugin(name string, conf interface{}) RemoteOption {
+	return pluginConf{
+		name: name,
+		conf: conf,
+	}
+}
+
+type pluginConf struct {
+	// Name is the name of the plugin
+	name string
+	conf interface{}
+}
+
+func (p pluginConf) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.pluginConfs.Plugins[p.name] = p.conf
+		return nil
+	}
+	return fmt.Errorf("WithPlugin option not supported for this remote")
+}
diff --git a/libcontainerd/remote_daemon_options_linux.go b/libcontainerd/remote_daemon_options_linux.go
new file mode 100644
index 0000000..1e5a981
--- /dev/null
+++ b/libcontainerd/remote_daemon_options_linux.go
@@ -0,0 +1,34 @@
+package libcontainerd
+
+import "fmt"
+
+// WithOOMScore defines the oom_score_adj to set for the containerd process.
+func WithOOMScore(score int) RemoteOption {
+	return oomScore(score)
+}
+
+type oomScore int
+
+func (o oomScore) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.OOMScore = int(o)
+		return nil
+	}
+	return fmt.Errorf("WithOOMScore option not supported for this remote")
+}
+
+// WithSubreaper sets whether containerd should register itself as a
+// subreaper
+func WithSubreaper(reap bool) RemoteOption {
+	return subreaper(reap)
+}
+
+type subreaper bool
+
+func (s subreaper) Apply(r Remote) error {
+	if remote, ok := r.(*remote); ok {
+		remote.Subreaper = bool(s)
+		return nil
+	}
+	return fmt.Errorf("WithSubreaper option not supported for this remote")
+}
diff --git a/libcontainerd/remote_daemon_process.go b/libcontainerd/remote_daemon_process.go
new file mode 100644
index 0000000..a00406e
--- /dev/null
+++ b/libcontainerd/remote_daemon_process.go
@@ -0,0 +1,56 @@
+// +build !windows
+
+package libcontainerd
+
+import "github.com/pkg/errors"
+
+// process represents the state for the main container process or an exec.
+type process struct {
+	// id is the logical name of the process
+	id string
+
+	// cid is the container id to which this process belongs
+	cid string
+
+	// pid is the identifier of the process
+	pid uint32
+
+	// io holds the io reader/writer associated with the process
+	io *IOPipe
+
+	// root is the state directory for the process
+	root string
+}
+
+func (p *process) ID() string {
+	return p.id
+}
+
+func (p *process) Pid() uint32 {
+	return p.pid
+}
+
+func (p *process) SetPid(pid uint32) error {
+	if p.pid != 0 {
+		return errors.Errorf("pid is already set to %d", pid)
+	}
+
+	p.pid = pid
+	return nil
+}
+
+func (p *process) IOPipe() *IOPipe {
+	return p.io
+}
+
+func (p *process) CloseIO() {
+	if p.io.Stdin != nil {
+		p.io.Stdin.Close()
+	}
+	if p.io.Stdout != nil {
+		p.io.Stdout.Close()
+	}
+	if p.io.Stderr != nil {
+		p.io.Stderr.Close()
+	}
+}
diff --git a/libcontainerd/remote_daemon_process_linux.go b/libcontainerd/remote_daemon_process_linux.go
new file mode 100644
index 0000000..fd54d01
--- /dev/null
+++ b/libcontainerd/remote_daemon_process_linux.go
@@ -0,0 +1,59 @@
+package libcontainerd
+
+import (
+	"os"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+)
+
+var fdNames = map[int]string{
+	unix.Stdin:  "stdin",
+	unix.Stdout: "stdout",
+	unix.Stderr: "stderr",
+}
+
+func (p *process) pipeName(index int) string {
+	return filepath.Join(p.root, p.id+"-"+fdNames[index])
+}
+
+func (p *process) IOPaths() (string, string, string) {
+	var (
+		stdin  = p.pipeName(unix.Stdin)
+		stdout = p.pipeName(unix.Stdout)
+		stderr = p.pipeName(unix.Stderr)
+	)
+	// TODO: debug why we're having zombies when I don't unset those
+	if p.io.Stdin == nil {
+		stdin = ""
+	}
+	if p.io.Stderr == nil {
+		stderr = ""
+	}
+	return stdin, stdout, stderr
+}
+
+func (p *process) Cleanup() error {
+	var retErr error
+
+	// Ensure everything was closed
+	p.CloseIO()
+
+	for _, i := range [3]string{
+		p.pipeName(unix.Stdin),
+		p.pipeName(unix.Stdout),
+		p.pipeName(unix.Stderr),
+	} {
+		err := os.Remove(i)
+		if err != nil {
+			if retErr == nil {
+				retErr = errors.Wrapf(err, "failed to remove %s", i)
+			} else {
+				retErr = errors.Wrapf(retErr, "failed to remove %s", i)
+			}
+		}
+	}
+
+	return retErr
+}
diff --git a/libcontainerd/remote_daemon_windows.go b/libcontainerd/remote_daemon_windows.go
new file mode 100644
index 0000000..44b5fc0
--- /dev/null
+++ b/libcontainerd/remote_daemon_windows.go
@@ -0,0 +1,50 @@
+// +build remote_daemon
+
+package libcontainerd
+
+import (
+	"os"
+)
+
+const (
+	grpcPipeName  = `\\.\pipe\docker-containerd-containerd`
+	debugPipeName = `\\.\pipe\docker-containerd-debug`
+)
+
+func (r *remote) setDefaults() {
+	if r.GRPC.Address == "" {
+		r.GRPC.Address = grpcPipeName
+	}
+	if r.Debug.Address == "" {
+		r.Debug.Address = debugPipeName
+	}
+	if r.Debug.Level == "" {
+		r.Debug.Level = "info"
+	}
+	if r.snapshotter == "" {
+		r.snapshotter = "naive" // TODO(mlaventure): switch to "windows" once implemented
+	}
+}
+
+func (r *remote) stopDaemon() {
+	p, err := os.FindProcess(r.daemonPid)
+	if err != nil {
+		r.logger.WithField("pid", r.daemonPid).Warn("could not find daemon process")
+		return
+	}
+
+	if err = p.Kill(); err != nil {
+		r.logger.WithError(err).WithField("pid", r.daemonPid).Warn("could not kill daemon process")
+		return
+	}
+
+	_, err = p.Wait()
+	if err != nil {
+		r.logger.WithError(err).WithField("pid", r.daemonPid).Warn("wait for daemon process")
+		return
+	}
+}
+
+func (r *remote) platformCleanup() {
+	// Nothing to do
+}
diff --git a/libcontainerd/remote_local.go b/libcontainerd/remote_local.go
new file mode 100644
index 0000000..ad3be03
--- /dev/null
+++ b/libcontainerd/remote_local.go
@@ -0,0 +1,59 @@
+// +build windows
+
+package libcontainerd
+
+import (
+	"sync"
+
+	"github.com/sirupsen/logrus"
+)
+
+type remote struct {
+	sync.RWMutex
+
+	logger  *logrus.Entry
+	clients []*client
+
+	// Options
+	rootDir  string
+	stateDir string
+}
+
+// New creates a fresh instance of libcontainerd remote.
+func New(rootDir, stateDir string, options ...RemoteOption) (Remote, error) {
+	return &remote{
+		logger:   logrus.WithField("module", "libcontainerd"),
+		rootDir:  rootDir,
+		stateDir: stateDir,
+	}, nil
+}
+
+type client struct {
+	sync.Mutex
+
+	rootDir    string
+	stateDir   string
+	backend    Backend
+	logger     *logrus.Entry
+	eventQ     queue
+	containers map[string]*container
+}
+
+func (r *remote) NewClient(ns string, b Backend) (Client, error) {
+	c := &client{
+		rootDir:    r.rootDir,
+		stateDir:   r.stateDir,
+		backend:    b,
+		logger:     r.logger.WithField("namespace", ns),
+		containers: make(map[string]*container),
+	}
+	r.Lock()
+	r.clients = append(r.clients, c)
+	r.Unlock()
+
+	return c, nil
+}
+
+func (r *remote) Cleanup() {
+	// Nothing to do
+}
diff --git a/libcontainerd/remote_unix.go b/libcontainerd/remote_unix.go
deleted file mode 100644
index 7bab53e..0000000
--- a/libcontainerd/remote_unix.go
+++ /dev/null
@@ -1,565 +0,0 @@
-// +build linux solaris
-
-package libcontainerd
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
-	"net"
-	"os"
-	"os/exec"
-	"path/filepath"
-	goruntime "runtime"
-	"strconv"
-	"strings"
-	"sync"
-	"time"
-
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/docker/docker/pkg/locker"
-	"github.com/docker/docker/pkg/system"
-	"github.com/golang/protobuf/ptypes"
-	"github.com/golang/protobuf/ptypes/timestamp"
-	"github.com/sirupsen/logrus"
-	"golang.org/x/net/context"
-	"golang.org/x/sys/unix"
-	"google.golang.org/grpc"
-	"google.golang.org/grpc/grpclog"
-	"google.golang.org/grpc/health/grpc_health_v1"
-	"google.golang.org/grpc/transport"
-)
-
-const (
-	maxConnectionRetryCount      = 3
-	containerdHealthCheckTimeout = 3 * time.Second
-	containerdShutdownTimeout    = 15 * time.Second
-	containerdBinary             = "docker-containerd"
-	containerdPidFilename        = "docker-containerd.pid"
-	containerdSockFilename       = "docker-containerd.sock"
-	containerdStateDir           = "containerd"
-	eventTimestampFilename       = "event.ts"
-)
-
-type remote struct {
-	sync.RWMutex
-	apiClient            containerd.APIClient
-	daemonPid            int
-	stateDir             string
-	rpcAddr              string
-	startDaemon          bool
-	closedManually       bool
-	debugLog             bool
-	rpcConn              *grpc.ClientConn
-	clients              []*client
-	eventTsPath          string
-	runtime              string
-	runtimeArgs          []string
-	daemonWaitCh         chan struct{}
-	liveRestore          bool
-	oomScore             int
-	restoreFromTimestamp *timestamp.Timestamp
-}
-
-// New creates a fresh instance of libcontainerd remote.
-func New(stateDir string, options ...RemoteOption) (_ Remote, err error) {
-	defer func() {
-		if err != nil {
-			err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specified the correct address. Got error: %v", err)
-		}
-	}()
-	r := &remote{
-		stateDir:    stateDir,
-		daemonPid:   -1,
-		eventTsPath: filepath.Join(stateDir, eventTimestampFilename),
-	}
-	for _, option := range options {
-		if err := option.Apply(r); err != nil {
-			return nil, err
-		}
-	}
-
-	if err := system.MkdirAll(stateDir, 0700, ""); err != nil {
-		return nil, err
-	}
-
-	if r.rpcAddr == "" {
-		r.rpcAddr = filepath.Join(stateDir, containerdSockFilename)
-	}
-
-	if r.startDaemon {
-		if err := r.runContainerdDaemon(); err != nil {
-			return nil, err
-		}
-	}
-
-	// don't output the grpc reconnect logging
-	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
-	dialOpts := []grpc.DialOption{
-		grpc.WithInsecure(),
-		grpc.WithBackoffMaxDelay(2 * time.Second),
-		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
-			return net.DialTimeout("unix", addr, timeout)
-		}),
-	}
-	conn, err := grpc.Dial(r.rpcAddr, dialOpts...)
-	if err != nil {
-		return nil, fmt.Errorf("error connecting to containerd: %v", err)
-	}
-
-	r.rpcConn = conn
-	r.apiClient = containerd.NewAPIClient(conn)
-
-	// Get the timestamp to restore from
-	t := r.getLastEventTimestamp()
-	tsp, err := ptypes.TimestampProto(t)
-	if err != nil {
-		logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err)
-	}
-	r.restoreFromTimestamp = tsp
-
-	go r.handleConnectionChange()
-
-	if err := r.startEventsMonitor(); err != nil {
-		return nil, err
-	}
-
-	return r, nil
-}
-
-func (r *remote) UpdateOptions(options ...RemoteOption) error {
-	for _, option := range options {
-		if err := option.Apply(r); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (r *remote) handleConnectionChange() {
-	var transientFailureCount = 0
-
-	ticker := time.NewTicker(500 * time.Millisecond)
-	defer ticker.Stop()
-	healthClient := grpc_health_v1.NewHealthClient(r.rpcConn)
-
-	for {
-		<-ticker.C
-		ctx, cancel := context.WithTimeout(context.Background(), containerdHealthCheckTimeout)
-		_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
-		cancel()
-		if err == nil {
-			continue
-		}
-
-		logrus.Debugf("libcontainerd: containerd health check returned error: %v", err)
-
-		if r.daemonPid != -1 {
-			if r.closedManually {
-				// Well, we asked for it to stop, just return
-				return
-			}
-			// all other errors are transient
-			// Reset state to be notified of next failure
-			transientFailureCount++
-			if transientFailureCount >= maxConnectionRetryCount {
-				transientFailureCount = 0
-				if system.IsProcessAlive(r.daemonPid) {
-					system.KillProcess(r.daemonPid)
-				}
-				<-r.daemonWaitCh
-				if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error
-					logrus.Errorf("libcontainerd: error restarting containerd: %v", err)
-				}
-				continue
-			}
-		}
-	}
-}
-
-func (r *remote) Cleanup() {
-	if r.daemonPid == -1 {
-		return
-	}
-	r.closedManually = true
-	r.rpcConn.Close()
-	// Ask the daemon to quit
-	unix.Kill(r.daemonPid, unix.SIGTERM)
-
-	// Wait up to 15secs for it to stop
-	for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second {
-		if !system.IsProcessAlive(r.daemonPid) {
-			break
-		}
-		time.Sleep(time.Second)
-	}
-
-	if system.IsProcessAlive(r.daemonPid) {
-		logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid)
-		unix.Kill(r.daemonPid, unix.SIGKILL)
-	}
-
-	// cleanup some files
-	os.Remove(filepath.Join(r.stateDir, containerdPidFilename))
-	os.Remove(filepath.Join(r.stateDir, containerdSockFilename))
-}
-
-func (r *remote) Client(b Backend) (Client, error) {
-	c := &client{
-		clientCommon: clientCommon{
-			backend:    b,
-			containers: make(map[string]*container),
-			locker:     locker.New(),
-		},
-		remote:        r,
-		exitNotifiers: make(map[string]*exitNotifier),
-		liveRestore:   r.liveRestore,
-	}
-
-	r.Lock()
-	r.clients = append(r.clients, c)
-	r.Unlock()
-	return c, nil
-}
-
-func (r *remote) updateEventTimestamp(t time.Time) {
-	f, err := os.OpenFile(r.eventTsPath, unix.O_CREAT|unix.O_WRONLY|unix.O_TRUNC, 0600)
-	if err != nil {
-		logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err)
-		return
-	}
-	defer f.Close()
-
-	b, err := t.MarshalText()
-	if err != nil {
-		logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err)
-		return
-	}
-
-	n, err := f.Write(b)
-	if err != nil || n != len(b) {
-		logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err)
-		f.Truncate(0)
-		return
-	}
-}
-
-func (r *remote) getLastEventTimestamp() time.Time {
-	t := time.Now()
-
-	fi, err := os.Stat(r.eventTsPath)
-	if os.IsNotExist(err) || fi.Size() == 0 {
-		return t
-	}
-
-	f, err := os.Open(r.eventTsPath)
-	if err != nil {
-		logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err)
-		return t
-	}
-	defer f.Close()
-
-	b := make([]byte, fi.Size())
-	n, err := f.Read(b)
-	if err != nil || n != len(b) {
-		logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err)
-		return t
-	}
-
-	t.UnmarshalText(b)
-
-	return t
-}
-
-func (r *remote) startEventsMonitor() error {
-	// First, get past events
-	t := r.getLastEventTimestamp()
-	tsp, err := ptypes.TimestampProto(t)
-	if err != nil {
-		logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err)
-	}
-	er := &containerd.EventsRequest{
-		Timestamp: tsp,
-	}
-
-	var events containerd.API_EventsClient
-	for {
-		events, err = r.apiClient.Events(context.Background(), er, grpc.FailFast(false))
-		if err == nil {
-			break
-		}
-		logrus.Warnf("libcontainerd: failed to get events from containerd: %q", err)
-
-		if r.closedManually {
-			// ignore error if grpc remote connection is closed manually
-			return nil
-		}
-
-		<-time.After(100 * time.Millisecond)
-	}
-
-	go r.handleEventStream(events)
-	return nil
-}
-
-func (r *remote) handleEventStream(events containerd.API_EventsClient) {
-	for {
-		e, err := events.Recv()
-		if err != nil {
-			if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc &&
-				r.closedManually {
-				// ignore error if grpc remote connection is closed manually
-				return
-			}
-			logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err)
-			go r.startEventsMonitor()
-			return
-		}
-
-		logrus.Debugf("libcontainerd: received containerd event: %#v", e)
-
-		var container *container
-		var c *client
-		r.RLock()
-		for _, c = range r.clients {
-			container, err = c.getContainer(e.Id)
-			if err == nil {
-				break
-			}
-		}
-		r.RUnlock()
-		if container == nil {
-			logrus.Warnf("libcontainerd: unknown container %s", e.Id)
-			continue
-		}
-
-		if err := container.handleEvent(e); err != nil {
-			logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err)
-		}
-
-		tsp, err := ptypes.Timestamp(e.Timestamp)
-		if err != nil {
-			logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err)
-			continue
-		}
-
-		r.updateEventTimestamp(tsp)
-	}
-}
-
-func (r *remote) runContainerdDaemon() error {
-	pidFilename := filepath.Join(r.stateDir, containerdPidFilename)
-	f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600)
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-
-	// File exist, check if the daemon is alive
-	b := make([]byte, 8)
-	n, err := f.Read(b)
-	if err != nil && err != io.EOF {
-		return err
-	}
-
-	if n > 0 {
-		pid, err := strconv.ParseUint(string(b[:n]), 10, 64)
-		if err != nil {
-			return err
-		}
-		if system.IsProcessAlive(int(pid)) {
-			logrus.Infof("libcontainerd: previous instance of containerd still alive (%d)", pid)
-			r.daemonPid = int(pid)
-			return nil
-		}
-	}
-
-	// rewind the file
-	_, err = f.Seek(0, os.SEEK_SET)
-	if err != nil {
-		return err
-	}
-
-	// Truncate it
-	err = f.Truncate(0)
-	if err != nil {
-		return err
-	}
-
-	// Start a new instance
-	args := []string{
-		"-l", fmt.Sprintf("unix://%s", r.rpcAddr),
-		"--metrics-interval=0",
-		"--start-timeout", "2m",
-		"--state-dir", filepath.Join(r.stateDir, containerdStateDir),
-	}
-	if goruntime.GOOS == "solaris" {
-		args = append(args, "--shim", "containerd-shim", "--runtime", "runc")
-	} else {
-		args = append(args, "--shim", "docker-containerd-shim")
-		if r.runtime != "" {
-			args = append(args, "--runtime")
-			args = append(args, r.runtime)
-		}
-	}
-	if r.debugLog {
-		args = append(args, "--debug")
-	}
-	if len(r.runtimeArgs) > 0 {
-		for _, v := range r.runtimeArgs {
-			args = append(args, "--runtime-args")
-			args = append(args, v)
-		}
-		logrus.Debugf("libcontainerd: runContainerdDaemon: runtimeArgs: %s", args)
-	}
-
-	cmd := exec.Command(containerdBinary, args...)
-	// redirect containerd logs to docker logs
-	cmd.Stdout = os.Stdout
-	cmd.Stderr = os.Stderr
-	cmd.SysProcAttr = setSysProcAttr(true)
-	cmd.Env = nil
-	// clear the NOTIFY_SOCKET from the env when starting containerd
-	for _, e := range os.Environ() {
-		if !strings.HasPrefix(e, "NOTIFY_SOCKET") {
-			cmd.Env = append(cmd.Env, e)
-		}
-	}
-	if err := cmd.Start(); err != nil {
-		return err
-	}
-
-	// unless strictly necessary, do not add anything in between here
-	// as the reaper goroutine below needs to kick in as soon as possible
-	// and any "return" from code paths added here will defeat the reaper
-	// process.
-
-	r.daemonWaitCh = make(chan struct{})
-	go func() {
-		cmd.Wait()
-		close(r.daemonWaitCh)
-	}() // Reap our child when needed
-
-	logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid)
-	if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil {
-		system.KillProcess(cmd.Process.Pid)
-		return err
-	}
-	if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil {
-		system.KillProcess(cmd.Process.Pid)
-		return err
-	}
-
-	r.daemonPid = cmd.Process.Pid
-	return nil
-}
-
-// WithRemoteAddr sets the external containerd socket to connect to.
-func WithRemoteAddr(addr string) RemoteOption {
-	return rpcAddr(addr)
-}
-
-type rpcAddr string
-
-func (a rpcAddr) Apply(r Remote) error {
-	if remote, ok := r.(*remote); ok {
-		remote.rpcAddr = string(a)
-		return nil
-	}
-	return fmt.Errorf("WithRemoteAddr option not supported for this remote")
-}
-
-// WithRuntimePath sets the path of the runtime to be used as the
-// default by containerd
-func WithRuntimePath(rt string) RemoteOption {
-	return runtimePath(rt)
-}
-
-type runtimePath string
-
-func (rt runtimePath) Apply(r Remote) error {
-	if remote, ok := r.(*remote); ok {
-		remote.runtime = string(rt)
-		return nil
-	}
-	return fmt.Errorf("WithRuntime option not supported for this remote")
-}
-
-// WithRuntimeArgs sets the list of runtime args passed to containerd
-func WithRuntimeArgs(args []string) RemoteOption {
-	return runtimeArgs(args)
-}
-
-type runtimeArgs []string
-
-func (rt runtimeArgs) Apply(r Remote) error {
-	if remote, ok := r.(*remote); ok {
-		remote.runtimeArgs = rt
-		return nil
-	}
-	return fmt.Errorf("WithRuntimeArgs option not supported for this remote")
-}
-
-// WithStartDaemon defines if libcontainerd should also run containerd daemon.
-func WithStartDaemon(start bool) RemoteOption {
-	return startDaemon(start)
-}
-
-type startDaemon bool
-
-func (s startDaemon) Apply(r Remote) error {
-	if remote, ok := r.(*remote); ok {
-		remote.startDaemon = bool(s)
-		return nil
-	}
-	return fmt.Errorf("WithStartDaemon option not supported for this remote")
-}
-
-// WithDebugLog defines if containerd debug logs will be enabled for daemon.
-func WithDebugLog(debug bool) RemoteOption {
-	return debugLog(debug)
-}
-
-type debugLog bool
-
-func (d debugLog) Apply(r Remote) error {
-	if remote, ok := r.(*remote); ok {
-		remote.debugLog = bool(d)
-		return nil
-	}
-	return fmt.Errorf("WithDebugLog option not supported for this remote")
-}
-
-// WithLiveRestore defines if containers are stopped on shutdown or restored.
-func WithLiveRestore(v bool) RemoteOption {
-	return liveRestore(v)
-}
-
-type liveRestore bool
-
-func (l liveRestore) Apply(r Remote) error {
-	if remote, ok := r.(*remote); ok {
-		remote.liveRestore = bool(l)
-		for _, c := range remote.clients {
-			c.liveRestore = bool(l)
-		}
-		return nil
-	}
-	return fmt.Errorf("WithLiveRestore option not supported for this remote")
-}
-
-// WithOOMScore defines the oom_score_adj to set for the containerd process.
-func WithOOMScore(score int) RemoteOption {
-	return oomScore(score)
-}
-
-type oomScore int
-
-func (o oomScore) Apply(r Remote) error {
-	if remote, ok := r.(*remote); ok {
-		remote.oomScore = int(o)
-		return nil
-	}
-	return fmt.Errorf("WithOOMScore option not supported for this remote")
-}
diff --git a/libcontainerd/remote_windows.go b/libcontainerd/remote_windows.go
deleted file mode 100644
index 74c1044..0000000
--- a/libcontainerd/remote_windows.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package libcontainerd
-
-import "github.com/docker/docker/pkg/locker"
-
-type remote struct {
-}
-
-func (r *remote) Client(b Backend) (Client, error) {
-	c := &client{
-		clientCommon: clientCommon{
-			backend:    b,
-			containers: make(map[string]*container),
-			locker:     locker.New(),
-		},
-	}
-	return c, nil
-}
-
-// Cleanup is a no-op on Windows. It is here to implement the interface.
-func (r *remote) Cleanup() {
-}
-
-func (r *remote) UpdateOptions(opts ...RemoteOption) error {
-	return nil
-}
-
-// New creates a fresh instance of libcontainerd remote. On Windows,
-// this is not used as there is no remote containerd process.
-func New(_ string, _ ...RemoteOption) (Remote, error) {
-	return &remote{}, nil
-}
-
-// WithLiveRestore is a noop on windows.
-func WithLiveRestore(v bool) RemoteOption {
-	return nil
-}
diff --git a/libcontainerd/types.go b/libcontainerd/types.go
index c7ade6b..9eede43 100644
--- a/libcontainerd/types.go
+++ b/libcontainerd/types.go
@@ -1,64 +1,112 @@
 package libcontainerd
 
 import (
+	"context"
 	"io"
+	"time"
 
-	containerd "github.com/containerd/containerd/api/grpc/types"
+	"github.com/containerd/containerd"
 	"github.com/opencontainers/runtime-spec/specs-go"
-	"golang.org/x/net/context"
 )
 
-// State constants used in state change reporting.
+// EventType represents a possible event from libcontainerd
+type EventType string
+
+// Event constants used when reporting events
 const (
-	StateStart       = "start-container"
-	StatePause       = "pause"
-	StateResume      = "resume"
-	StateExit        = "exit"
-	StateRestore     = "restore"
-	StateExitProcess = "exit-process"
-	StateOOM         = "oom" // fake state
+	EventUnknown     EventType = "unknown"
+	EventExit        EventType = "exit"
+	EventOOM         EventType = "oom"
+	EventCreate      EventType = "create"
+	EventStart       EventType = "start"
+	EventExecAdded   EventType = "exec-added"
+	EventExecStarted EventType = "exec-started"
+	EventPaused      EventType = "paused"
+	EventResumed     EventType = "resumed"
 )
 
-// CommonStateInfo contains the state info common to all platforms.
-type CommonStateInfo struct { // FIXME: event?
-	State     string
-	Pid       uint32
-	ExitCode  uint32
-	ProcessID string
+// Status represents the current status of a container
+type Status string
+
+// Possible container statuses
+const (
+	// Running indicates the process is currently executing
+	StatusRunning Status = "running"
+	// Created indicates the process has been created within containerd but the
+	// user's defined process has not started
+	StatusCreated Status = "created"
+	// Stopped indicates that the process has ran and exited
+	StatusStopped Status = "stopped"
+	// Paused indicates that the process is currently paused
+	StatusPaused Status = "paused"
+	// Pausing indicates that the process is currently switching from a
+	// running state into a paused state
+	StatusPausing Status = "pausing"
+	// Unknown indicates that we could not determine the status from the runtime
+	StatusUnknown Status = "unknown"
+)
+
+// Remote on Linux defines the accesspoint to the containerd grpc API.
+// Remote on Windows is largely an unimplemented interface as there is
+// no remote containerd.
+type Remote interface {
+	// Client returns a new Client instance connected with given Backend.
+	NewClient(namespace string, backend Backend) (Client, error)
+	// Cleanup stops containerd if it was started by libcontainerd.
+	// Note this is not used on Windows as there is no remote containerd.
+	Cleanup()
+}
+
+// RemoteOption allows to configure parameters of remotes.
+// This is unused on Windows.
+type RemoteOption interface {
+	Apply(Remote) error
+}
+
+// EventInfo contains the event info
+type EventInfo struct {
+	ContainerID string
+	ProcessID   string
+	Pid         uint32
+	ExitCode    uint32
+	ExitedAt    time.Time
+	OOMKilled   bool
+	// Windows Only field
+	UpdatePending bool
 }
 
 // Backend defines callbacks that the client of the library needs to implement.
 type Backend interface {
-	StateChanged(containerID string, state StateInfo) error
+	ProcessEvent(containerID string, event EventType, ei EventInfo) error
 }
 
 // Client provides access to containerd features.
 type Client interface {
-	GetServerVersion(ctx context.Context) (*ServerVersion, error)
-	Create(containerID string, checkpoint string, checkpointDir string, spec specs.Spec, attachStdio StdioCallback, options ...CreateOption) error
-	Signal(containerID string, sig int) error
-	SignalProcess(containerID string, processFriendlyName string, sig int) error
-	AddProcess(ctx context.Context, containerID, processFriendlyName string, process Process, attachStdio StdioCallback) (int, error)
-	Resize(containerID, processFriendlyName string, width, height int) error
-	Pause(containerID string) error
-	Resume(containerID string) error
-	Restore(containerID string, attachStdio StdioCallback, options ...CreateOption) error
-	Stats(containerID string) (*Stats, error)
-	GetPidsForContainer(containerID string) ([]int, error)
-	Summary(containerID string) ([]Summary, error)
-	UpdateResources(containerID string, resources Resources) error
-	CreateCheckpoint(containerID string, checkpointID string, checkpointDir string, exit bool) error
-	DeleteCheckpoint(containerID string, checkpointID string, checkpointDir string) error
-	ListCheckpoints(containerID string, checkpointDir string) (*Checkpoints, error)
-}
+	Version(ctx context.Context) (containerd.Version, error)
 
-// CreateOption allows to configure parameters of container creation.
-type CreateOption interface {
-	Apply(interface{}) error
+	Restore(ctx context.Context, containerID string, attachStdio StdioCallback) (alive bool, pid int, err error)
+
+	Create(ctx context.Context, containerID string, spec *specs.Spec, runtimeOptions interface{}) error
+	Start(ctx context.Context, containerID, checkpointDir string, withStdin bool, attachStdio StdioCallback) (pid int, err error)
+	SignalProcess(ctx context.Context, containerID, processID string, signal int) error
+	Exec(ctx context.Context, containerID, processID string, spec *specs.Process, withStdin bool, attachStdio StdioCallback) (int, error)
+	ResizeTerminal(ctx context.Context, containerID, processID string, width, height int) error
+	CloseStdin(ctx context.Context, containerID, processID string) error
+	Pause(ctx context.Context, containerID string) error
+	Resume(ctx context.Context, containerID string) error
+	Stats(ctx context.Context, containerID string) (*Stats, error)
+	ListPids(ctx context.Context, containerID string) ([]uint32, error)
+	Summary(ctx context.Context, containerID string) ([]Summary, error)
+	DeleteTask(ctx context.Context, containerID string) (uint32, time.Time, error)
+	Delete(ctx context.Context, containerID string) error
+	Status(ctx context.Context, containerID string) (Status, error)
+
+	UpdateResources(ctx context.Context, containerID string, resources *Resources) error
+	CreateCheckpoint(ctx context.Context, containerID, checkpointDir string, exit bool) error
 }
 
 // StdioCallback is called to connect a container or process stdio.
-type StdioCallback func(IOPipe) error
+type StdioCallback func(*IOPipe) (containerd.IO, error)
 
 // IOPipe contains the stdio streams.
 type IOPipe struct {
@@ -66,10 +114,12 @@
 	Stdout   io.ReadCloser
 	Stderr   io.ReadCloser
 	Terminal bool // Whether stderr is connected on Windows
+
+	cancel context.CancelFunc
+	config containerd.IOConfig
 }
 
 // ServerVersion contains version information as retrieved from the
 // server
 type ServerVersion struct {
-	containerd.GetServerVersionResponse
 }
diff --git a/libcontainerd/types_linux.go b/libcontainerd/types_linux.go
index f21a85e..b63efcb 100644
--- a/libcontainerd/types_linux.go
+++ b/libcontainerd/types_linux.go
@@ -1,49 +1,30 @@
 package libcontainerd
 
 import (
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/opencontainers/runtime-spec/specs-go"
+	"time"
+
+	"github.com/containerd/cgroups"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 )
 
-// Process contains information to start a specific application inside the container.
-type Process struct {
-	// Terminal creates an interactive terminal for the container.
-	Terminal bool `json:"terminal"`
-	// User specifies user information for the process.
-	User *specs.User `json:"user"`
-	// Args specifies the binary and arguments for the application to execute.
-	Args []string `json:"args"`
-	// Env populates the process environment for the process.
-	Env []string `json:"env,omitempty"`
-	// Cwd is the current working directory for the process and must be
-	// relative to the container's root.
-	Cwd *string `json:"cwd"`
-	// Capabilities are linux capabilities that are kept for the container.
-	Capabilities []string `json:"capabilities,omitempty"`
-	// Rlimits specifies rlimit options to apply to the process.
-	Rlimits []specs.POSIXRlimit `json:"rlimits,omitempty"`
-	// ApparmorProfile specifies the apparmor profile for the container.
-	ApparmorProfile *string `json:"apparmorProfile,omitempty"`
-	// SelinuxLabel specifies the selinux context that the container process is run as.
-	SelinuxLabel *string `json:"selinuxLabel,omitempty"`
-}
-
-// StateInfo contains description about the new state container has entered.
-type StateInfo struct {
-	CommonStateInfo
-
-	// Platform specific StateInfo
-	OOMKilled bool
-}
-
-// Stats contains a stats properties from containerd.
-type Stats containerd.StatsResponse
-
-// Summary contains a container summary from containerd
+// Summary is not used on linux
 type Summary struct{}
 
-// Resources defines updatable container resource values.
-type Resources containerd.UpdateResource
+// Stats holds metrics properties as returned by containerd
+type Stats struct {
+	Read    time.Time
+	Metrics *cgroups.Metrics
+}
+
+func interfaceToStats(read time.Time, v interface{}) *Stats {
+	return &Stats{
+		Metrics: v.(*cgroups.Metrics),
+		Read:    read,
+	}
+}
+
+// Resources defines updatable container resource values. TODO: it must match containerd upcoming API
+type Resources specs.LinuxResources
 
 // Checkpoints contains the details of a checkpoint
-type Checkpoints containerd.ListCheckpointResponse
+type Checkpoints struct{}
diff --git a/libcontainerd/types_solaris.go b/libcontainerd/types_solaris.go
deleted file mode 100644
index 2ab18eb..0000000
--- a/libcontainerd/types_solaris.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package libcontainerd
-
-import (
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/opencontainers/runtime-spec/specs-go"
-)
-
-// Process contains information to start a specific application inside the container.
-type Process struct {
-	// Terminal creates an interactive terminal for the container.
-	Terminal bool `json:"terminal"`
-	// User specifies user information for the process.
-	User *specs.User `json:"user"`
-	// Args specifies the binary and arguments for the application to execute.
-	Args []string `json:"args"`
-	// Env populates the process environment for the process.
-	Env []string `json:"env,omitempty"`
-	// Cwd is the current working directory for the process and must be
-	// relative to the container's root.
-	Cwd *string `json:"cwd"`
-	// Capabilities are linux capabilities that are kept for the container.
-	Capabilities []string `json:"capabilities,omitempty"`
-}
-
-// Stats contains a stats properties from containerd.
-type Stats struct{}
-
-// Summary contains a container summary from containerd
-type Summary struct{}
-
-// StateInfo contains description about the new state container has entered.
-type StateInfo struct {
-	CommonStateInfo
-
-	// Platform specific StateInfo
-	OOMKilled bool
-}
-
-// Resources defines updatable container resource values.
-type Resources struct{}
-
-// Checkpoints contains the details of a checkpoint
-type Checkpoints containerd.ListCheckpointResponse
diff --git a/libcontainerd/types_windows.go b/libcontainerd/types_windows.go
index f271ecd..aab8079 100644
--- a/libcontainerd/types_windows.go
+++ b/libcontainerd/types_windows.go
@@ -1,27 +1,27 @@
 package libcontainerd
 
 import (
+	"time"
+
 	"github.com/Microsoft/hcsshim"
 	opengcs "github.com/Microsoft/opengcs/client"
-	"github.com/opencontainers/runtime-spec/specs-go"
 )
 
-// Process contains information to start a specific application inside the container.
-type Process specs.Process
-
 // Summary contains a ProcessList item from HCS to support `top`
 type Summary hcsshim.ProcessListItem
 
-// StateInfo contains description about the new state container has entered.
-type StateInfo struct {
-	CommonStateInfo
-
-	// Platform specific StateInfo
-	UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container.
+// Stats contains statistics from HCS
+type Stats struct {
+	Read     time.Time
+	HCSStats *hcsshim.Statistics
 }
 
-// Stats contains statistics from HCS
-type Stats hcsshim.Statistics
+func interfaceToStats(read time.Time, v interface{}) *Stats {
+	return &Stats{
+		HCSStats: v.(*hcsshim.Statistics),
+		Read:     read,
+	}
+}
 
 // Resources defines updatable container resource values.
 type Resources struct{}
diff --git a/libcontainerd/utils_linux.go b/libcontainerd/utils_linux.go
index 5372b88..0f0adf3 100644
--- a/libcontainerd/utils_linux.go
+++ b/libcontainerd/utils_linux.go
@@ -1,63 +1,12 @@
 package libcontainerd
 
-import (
-	"syscall"
+import "syscall"
 
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/opencontainers/runtime-spec/specs-go"
-	"golang.org/x/sys/unix"
-)
-
-func getRootIDs(s specs.Spec) (int, int, error) {
-	var hasUserns bool
-	for _, ns := range s.Linux.Namespaces {
-		if ns.Type == specs.UserNamespace {
-			hasUserns = true
-			break
-		}
-	}
-	if !hasUserns {
-		return 0, 0, nil
-	}
-	uid := hostIDFromMap(0, s.Linux.UIDMappings)
-	gid := hostIDFromMap(0, s.Linux.GIDMappings)
-	return uid, gid, nil
-}
-
-func hostIDFromMap(id uint32, mp []specs.LinuxIDMapping) int {
-	for _, m := range mp {
-		if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 {
-			return int(m.HostID + id - m.ContainerID)
-		}
-	}
-	return 0
-}
-
-func systemPid(ctr *containerd.Container) uint32 {
-	var pid uint32
-	for _, p := range ctr.Processes {
-		if p.Pid == InitFriendlyName {
-			pid = p.SystemPid
-		}
-	}
-	return pid
-}
-
-func convertRlimits(sr []specs.POSIXRlimit) (cr []*containerd.Rlimit) {
-	for _, r := range sr {
-		cr = append(cr, &containerd.Rlimit{
-			Type: r.Type,
-			Hard: r.Hard,
-			Soft: r.Soft,
-		})
-	}
-	return
-}
-
-// setPDeathSig sets the parent death signal to SIGKILL
-func setSysProcAttr(sid bool) *syscall.SysProcAttr {
+// containerdSysProcAttr returns the SysProcAttr to use when exec'ing
+// containerd
+func containerdSysProcAttr() *syscall.SysProcAttr {
 	return &syscall.SysProcAttr{
-		Setsid:    sid,
-		Pdeathsig: unix.SIGKILL,
+		Setsid:    true,
+		Pdeathsig: syscall.SIGKILL,
 	}
 }
diff --git a/libcontainerd/utils_solaris.go b/libcontainerd/utils_solaris.go
deleted file mode 100644
index 10ae599..0000000
--- a/libcontainerd/utils_solaris.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package libcontainerd
-
-import (
-	"syscall"
-
-	containerd "github.com/containerd/containerd/api/grpc/types"
-	"github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func getRootIDs(s specs.Spec) (int, int, error) {
-	return 0, 0, nil
-}
-
-func systemPid(ctr *containerd.Container) uint32 {
-	var pid uint32
-	for _, p := range ctr.Processes {
-		if p.Pid == InitFriendlyName {
-			pid = p.SystemPid
-		}
-	}
-	return pid
-}
-
-// setPDeathSig sets the parent death signal to SIGKILL
-func setSysProcAttr(sid bool) *syscall.SysProcAttr {
-	return nil
-}
diff --git a/libcontainerd/utils_windows.go b/libcontainerd/utils_windows.go
index aa2fe42..1347ff2 100644
--- a/libcontainerd/utils_windows.go
+++ b/libcontainerd/utils_windows.go
@@ -1,6 +1,12 @@
 package libcontainerd
 
-import "strings"
+import (
+	"strings"
+
+	"syscall"
+
+	opengcs "github.com/Microsoft/opengcs/client"
+)
 
 // setupEnvironmentVariables converts a string array of environment variables
 // into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc.
@@ -19,3 +25,22 @@
 func (s *LCOWOption) Apply(interface{}) error {
 	return nil
 }
+
+// debugGCS is a dirty hack for debugging for Linux Utility VMs. It simply
+// runs a bunch of commands inside the UVM, but seriously aides in advanced debugging.
+func (c *container) debugGCS() {
+	if c == nil || c.isWindows || c.hcsContainer == nil {
+		return
+	}
+	cfg := opengcs.Config{
+		Uvm:               c.hcsContainer,
+		UvmTimeoutSeconds: 600,
+	}
+	cfg.DebugGCS()
+}
+
+// containerdSysProcAttr returns the SysProcAttr to use when exec'ing
+// containerd
+func containerdSysProcAttr() *syscall.SysProcAttr {
+	return nil
+}
diff --git a/migrate/v1/migratev1_test.go b/migrate/v1/migratev1_test.go
index 51b6741..122a080 100644
--- a/migrate/v1/migratev1_test.go
+++ b/migrate/v1/migratev1_test.go
@@ -433,7 +433,7 @@
 	return 0, nil
 }
 
-func (l *mockLayer) Platform() layer.Platform {
+func (l *mockLayer) OS() layer.OS {
 	return ""
 }
 
diff --git a/oci/defaults.go b/oci/defaults.go
index d706faf..4188071 100644
--- a/oci/defaults.go
+++ b/oci/defaults.go
@@ -39,11 +39,8 @@
 func DefaultOSSpec(osName string) specs.Spec {
 	if osName == "windows" {
 		return DefaultWindowsSpec()
-	} else if osName == "solaris" {
-		return DefaultSolarisSpec()
-	} else {
-		return DefaultLinuxSpec()
 	}
+	return DefaultLinuxSpec()
 }
 
 // DefaultWindowsSpec create a default spec for running Windows containers
@@ -56,21 +53,19 @@
 	}
 }
 
-// DefaultSolarisSpec create a default spec for running Solaris containers
-func DefaultSolarisSpec() specs.Spec {
-	s := specs.Spec{
-		Version: "0.6.0",
-	}
-	s.Solaris = &specs.Solaris{}
-	return s
-}
-
 // DefaultLinuxSpec create a default spec for running Linux containers
 func DefaultLinuxSpec() specs.Spec {
 	s := specs.Spec{
 		Version: specs.Version,
-		Process: &specs.Process{},
-		Root:    &specs.Root{},
+		Process: &specs.Process{
+			Capabilities: &specs.LinuxCapabilities{
+				Bounding:    defaultCapabilities(),
+				Permitted:   defaultCapabilities(),
+				Inheritable: defaultCapabilities(),
+				Effective:   defaultCapabilities(),
+			},
+		},
+		Root: &specs.Root{},
 	}
 	s.Mounts = []specs.Mount{
 		{
@@ -116,14 +111,6 @@
 			Options:     []string{"nosuid", "noexec", "nodev", "mode=1777"},
 		},
 	}
-	s.Process = &specs.Process{
-		Capabilities: &specs.LinuxCapabilities{
-			Bounding:    defaultCapabilities(),
-			Permitted:   defaultCapabilities(),
-			Inheritable: defaultCapabilities(),
-			Effective:   defaultCapabilities(),
-		},
-	}
 
 	s.Linux = &specs.Linux{
 		MaskedPaths: []string{
@@ -132,6 +119,7 @@
 			"/proc/timer_list",
 			"/proc/timer_stats",
 			"/proc/sched_debug",
+			"/proc/scsi",
 		},
 		ReadonlyPaths: []string{
 			"/proc/asound",
diff --git a/opts/hosts.go b/opts/hosts.go
index 594cccf..f46b8ee 100644
--- a/opts/hosts.go
+++ b/opts/hosts.go
@@ -29,9 +29,9 @@
 // ValidateHost validates that the specified string is a valid host and returns it.
 func ValidateHost(val string) (string, error) {
 	host := strings.TrimSpace(val)
-	// The empty string means default and is not handled by parseDockerDaemonHost
+	// The empty string means default and is not handled by parseDaemonHost
 	if host != "" {
-		_, err := parseDockerDaemonHost(host)
+		_, err := parseDaemonHost(host)
 		if err != nil {
 			return val, err
 		}
@@ -52,7 +52,7 @@
 		}
 	} else {
 		var err error
-		host, err = parseDockerDaemonHost(host)
+		host, err = parseDaemonHost(host)
 		if err != nil {
 			return val, err
 		}
@@ -60,9 +60,9 @@
 	return host, nil
 }
 
-// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
+// parseDaemonHost parses the specified address and returns an address that will be used as the host.
 // Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
-func parseDockerDaemonHost(addr string) (string, error) {
+func parseDaemonHost(addr string) (string, error) {
 	addrParts := strings.SplitN(addr, "://", 2)
 	if len(addrParts) == 1 && addrParts[0] != "" {
 		addrParts = []string{"tcp", addrParts[0]}
diff --git a/opts/hosts_test.go b/opts/hosts_test.go
index 8aada6a..f3ae848 100644
--- a/opts/hosts_test.go
+++ b/opts/hosts_test.go
@@ -83,12 +83,12 @@
 		"localhost:5555/path":     "tcp://localhost:5555/path",
 	}
 	for invalidAddr, expectedError := range invalids {
-		if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError {
+		if addr, err := parseDaemonHost(invalidAddr); err == nil || err.Error() != expectedError {
 			t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr)
 		}
 	}
 	for validAddr, expectedAddr := range valids {
-		if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr {
+		if addr, err := parseDaemonHost(validAddr); err != nil || addr != expectedAddr {
 			t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr)
 		}
 	}
diff --git a/opts/opts.go b/opts/opts.go
index a86d74d..a2cc5e3 100644
--- a/opts/opts.go
+++ b/opts/opts.go
@@ -263,6 +263,16 @@
 	return val, nil
 }
 
+// ValidateSingleGenericResource validates that a single entry in the
+// generic resource list is valid.
+// i.e 'GPU=UID1' is valid however 'GPU:UID1' or 'UID1' isn't
+func ValidateSingleGenericResource(val string) (string, error) {
+	if strings.Count(val, "=") < 1 {
+		return "", fmt.Errorf("invalid node-generic-resource format `%s` expected `name=value`", val)
+	}
+	return val, nil
+}
+
 // ParseLink parses and validates the specified string as a link format (name:alias)
 func ParseLink(val string) (string, string, error) {
 	if val == "" {
diff --git a/pkg/README.md b/pkg/README.md
index c4b78a8..755cd96 100644
--- a/pkg/README.md
+++ b/pkg/README.md
@@ -1,8 +1,8 @@
-pkg/ is a collection of utility packages used by the Docker project without being specific to its internals.
+pkg/ is a collection of utility packages used by the Moby project without being specific to its internals.
 
-Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible.
+Utility packages are kept separate from the moby core codebase to keep it as small and concise as possible.
 If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the
-Docker organization, to facilitate re-use by other projects. However that is not the priority.
+Moby organization, to facilitate re-use by other projects. However that is not the priority.
 
 The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core
 Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad!
diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go
index 677c1e4..aa55637 100644
--- a/pkg/archive/archive.go
+++ b/pkg/archive/archive.go
@@ -20,7 +20,6 @@
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/pools"
-	"github.com/docker/docker/pkg/promise"
 	"github.com/docker/docker/pkg/system"
 	"github.com/sirupsen/logrus"
 )
@@ -55,18 +54,17 @@
 	}
 )
 
-// Archiver allows the reuse of most utility functions of this package
-// with a pluggable Untar function. Also, to facilitate the passing of
-// specific id mappings for untar, an archiver can be created with maps
-// which will then be passed to Untar operations
+// Archiver implements the Archiver interface and allows the reuse of most utility functions of
+// this package with a pluggable Untar function. Also, to facilitate the passing of specific id
+// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations.
 type Archiver struct {
-	Untar      func(io.Reader, string, *TarOptions) error
-	IDMappings *idtools.IDMappings
+	Untar         func(io.Reader, string, *TarOptions) error
+	IDMappingsVar *idtools.IDMappings
 }
 
 // NewDefaultArchiver returns a new Archiver without any IDMappings
 func NewDefaultArchiver() *Archiver {
-	return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}}
+	return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}}
 }
 
 // breakoutError is used to differentiate errors related to breaking out
@@ -1025,8 +1023,8 @@
 	}
 	defer archive.Close()
 	options := &TarOptions{
-		UIDMaps: archiver.IDMappings.UIDs(),
-		GIDMaps: archiver.IDMappings.GIDs(),
+		UIDMaps: archiver.IDMappingsVar.UIDs(),
+		GIDMaps: archiver.IDMappingsVar.GIDs(),
 	}
 	return archiver.Untar(archive, dst, options)
 }
@@ -1039,8 +1037,8 @@
 	}
 	defer archive.Close()
 	options := &TarOptions{
-		UIDMaps: archiver.IDMappings.UIDs(),
-		GIDMaps: archiver.IDMappings.GIDs(),
+		UIDMaps: archiver.IDMappingsVar.UIDs(),
+		GIDMaps: archiver.IDMappingsVar.GIDs(),
 	}
 	return archiver.Untar(archive, dst, options)
 }
@@ -1058,10 +1056,10 @@
 		return archiver.CopyFileWithTar(src, dst)
 	}
 
-	// if this archiver is set up with ID mapping we need to create
+	// if this Archiver is set up with ID mapping we need to create
 	// the new destination directory with the remapped root UID/GID pair
 	// as owner
-	rootIDs := archiver.IDMappings.RootPair()
+	rootIDs := archiver.IDMappingsVar.RootPair()
 	// Create dst, copy src's content into it
 	logrus.Debugf("Creating dest directory: %s", dst)
 	if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
@@ -1096,36 +1094,42 @@
 	}
 
 	r, w := io.Pipe()
-	errC := promise.Go(func() error {
-		defer w.Close()
+	errC := make(chan error, 1)
 
-		srcF, err := os.Open(src)
-		if err != nil {
-			return err
-		}
-		defer srcF.Close()
+	go func() {
+		defer close(errC)
 
-		hdr, err := tar.FileInfoHeader(srcSt, "")
-		if err != nil {
-			return err
-		}
-		hdr.Name = filepath.Base(dst)
-		hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+		errC <- func() error {
+			defer w.Close()
 
-		if err := remapIDs(archiver.IDMappings, hdr); err != nil {
-			return err
-		}
+			srcF, err := os.Open(src)
+			if err != nil {
+				return err
+			}
+			defer srcF.Close()
 
-		tw := tar.NewWriter(w)
-		defer tw.Close()
-		if err := tw.WriteHeader(hdr); err != nil {
-			return err
-		}
-		if _, err := io.Copy(tw, srcF); err != nil {
-			return err
-		}
-		return nil
-	})
+			hdr, err := tar.FileInfoHeader(srcSt, "")
+			if err != nil {
+				return err
+			}
+			hdr.Name = filepath.Base(dst)
+			hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+			if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
+				return err
+			}
+
+			tw := tar.NewWriter(w)
+			defer tw.Close()
+			if err := tw.WriteHeader(hdr); err != nil {
+				return err
+			}
+			if _, err := io.Copy(tw, srcF); err != nil {
+				return err
+			}
+			return nil
+		}()
+	}()
 	defer func() {
 		if er := <-errC; err == nil && er != nil {
 			err = er
@@ -1139,6 +1143,11 @@
 	return err
 }
 
+// IDMappings returns the IDMappings of the archiver.
+func (archiver *Archiver) IDMappings() *idtools.IDMappings {
+	return archiver.IDMappingsVar
+}
+
 func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
 	ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
 	hdr.Uid, hdr.Gid = ids.UID, ids.GID
diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go
index d6be350..989557c 100644
--- a/pkg/archive/archive_test.go
+++ b/pkg/archive/archive_test.go
@@ -72,12 +72,7 @@
 }
 
 func TestIsArchivePathTar(t *testing.T) {
-	var whichTar string
-	if runtime.GOOS == "solaris" {
-		whichTar = "gtar"
-	} else {
-		whichTar = "tar"
-	}
+	whichTar := "tar"
 	cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar)
 	cmd := exec.Command("sh", "-c", cmdStr)
 	output, err := cmd.CombinedOutput()
@@ -1183,8 +1178,10 @@
 func TestTempArchiveCloseMultipleTimes(t *testing.T) {
 	reader := ioutil.NopCloser(strings.NewReader("hello"))
 	tempArchive, err := NewTempArchive(reader, "")
+	require.NoError(t, err)
 	buf := make([]byte, 10)
 	n, err := tempArchive.Read(buf)
+	require.NoError(t, err)
 	if n != 5 {
 		t.Fatalf("Expected to read 5 bytes. Read %d instead", n)
 	}
diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go
index 73814d5..02e95ad 100644
--- a/pkg/archive/archive_unix.go
+++ b/pkg/archive/archive_unix.go
@@ -50,8 +50,8 @@
 		// Currently go does not fill in the major/minors
 		if s.Mode&unix.S_IFBLK != 0 ||
 			s.Mode&unix.S_IFCHR != 0 {
-			hdr.Devmajor = int64(major(s.Rdev))
-			hdr.Devminor = int64(minor(s.Rdev))
+			hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
+			hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
 		}
 	}
 
@@ -77,14 +77,6 @@
 	return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
 }
 
-func major(device uint64) uint64 {
-	return (device >> 8) & 0xfff
-}
-
-func minor(device uint64) uint64 {
-	return (device & 0xff) | ((device >> 12) & 0xfff00)
-}
-
 // handleTarTypeBlockCharFifo is an OS-specific helper function used by
 // createTarFile to handle the following types of header: Block; Char; Fifo
 func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
diff --git a/pkg/archive/archive_unix_test.go b/pkg/archive/archive_unix_test.go
index 2a628f4..8ca21d2 100644
--- a/pkg/archive/archive_unix_test.go
+++ b/pkg/archive/archive_unix_test.go
@@ -8,7 +8,7 @@
 	"io/ioutil"
 	"os"
 	"path/filepath"
-	"runtime"
+	"strings"
 	"syscall"
 	"testing"
 
@@ -223,9 +223,6 @@
 
 // TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows
 func TestTarUntarWithXattr(t *testing.T) {
-	if runtime.GOOS == "solaris" {
-		t.Skip()
-	}
 	origin, err := ioutil.TempDir("", "docker-test-untar-origin")
 	require.NoError(t, err)
 	defer os.RemoveAll(origin)
@@ -261,3 +258,58 @@
 		}
 	}
 }
+
+func TestCopyInfoDestinationPathSymlink(t *testing.T) {
+	tmpDir, _ := getTestTempDirs(t)
+	defer removeAllPaths(tmpDir)
+
+	root := strings.TrimRight(tmpDir, "/") + "/"
+
+	type FileTestData struct {
+		resource FileData
+		file     string
+		expected CopyInfo
+	}
+
+	testData := []FileTestData{
+		//Create a directory: /tmp/archive-copy-test*/dir1
+		//Test will "copy" file1 to dir1
+		{resource: FileData{filetype: Dir, path: "dir1", permissions: 0740}, file: "file1", expected: CopyInfo{Path: root + "dir1/file1", Exists: false, IsDir: false}},
+
+		//Create a symlink directory to dir1: /tmp/archive-copy-test*/dirSymlink -> dir1
+		//Test will "copy" file2 to dirSymlink
+		{resource: FileData{filetype: Symlink, path: "dirSymlink", contents: root + "dir1", permissions: 0600}, file: "file2", expected: CopyInfo{Path: root + "dirSymlink/file2", Exists: false, IsDir: false}},
+
+		//Create a file in tmp directory: /tmp/archive-copy-test*/file1
+		//Test to cover when the full file path already exists.
+		{resource: FileData{filetype: Regular, path: "file1", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "file1", Exists: true}},
+
+		//Create a directory: /tmp/archive-copy*/dir2
+		//Test to cover when the full directory path already exists
+		{resource: FileData{filetype: Dir, path: "dir2", permissions: 0740}, file: "", expected: CopyInfo{Path: root + "dir2", Exists: true, IsDir: true}},
+
+		//Create a symlink to a non-existent target: /tmp/archive-copy*/symlink1 -> noSuchTarget
+		//Negative test to cover symlinking to a target that does not exit
+		{resource: FileData{filetype: Symlink, path: "symlink1", contents: "noSuchTarget", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "noSuchTarget", Exists: false}},
+
+		//Create a file in tmp directory for next test: /tmp/existingfile
+		{resource: FileData{filetype: Regular, path: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}},
+
+		//Create a symlink to an existing file: /tmp/archive-copy*/symlink2 -> /tmp/existingfile
+		//Test to cover when the parent directory of a new file is a symlink
+		{resource: FileData{filetype: Symlink, path: "symlink2", contents: "existingfile", permissions: 0600}, file: "", expected: CopyInfo{Path: root + "existingfile", Exists: true}},
+	}
+
+	var dirs []FileData
+	for _, data := range testData {
+		dirs = append(dirs, data.resource)
+	}
+	provisionSampleDir(t, tmpDir, dirs)
+
+	for _, info := range testData {
+		p := filepath.Join(tmpDir, info.resource.path, info.file)
+		ci, err := CopyInfoDestinationPath(p)
+		assert.NoError(t, err)
+		assert.Equal(t, info.expected, ci)
+	}
+}
diff --git a/pkg/archive/changes_linux.go b/pkg/archive/changes_linux.go
index 8e96d96..e9eb478 100644
--- a/pkg/archive/changes_linux.go
+++ b/pkg/archive/changes_linux.go
@@ -294,7 +294,7 @@
 func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
 	if fi.Mode()&os.ModeCharDevice != 0 {
 		s := fi.Sys().(*syscall.Stat_t)
-		if major(s.Rdev) == 0 && minor(s.Rdev) == 0 {
+		if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert
 			return path, nil
 		}
 	}
diff --git a/pkg/archive/changes_posix_test.go b/pkg/archive/changes_posix_test.go
index 095102e..1a18370 100644
--- a/pkg/archive/changes_posix_test.go
+++ b/pkg/archive/changes_posix_test.go
@@ -7,16 +7,11 @@
 	"io/ioutil"
 	"os"
 	"path"
-	"runtime"
 	"sort"
 	"testing"
 )
 
 func TestHardLinkOrder(t *testing.T) {
-	//TODO Should run for Solaris
-	if runtime.GOOS == "solaris" {
-		t.Skip("gcp failures on Solaris")
-	}
 	names := []string{"file1.txt", "file2.txt", "file3.txt"}
 	msg := []byte("Hey y'all")
 
diff --git a/pkg/archive/changes_test.go b/pkg/archive/changes_test.go
index 8c14a86..3f2bd56 100644
--- a/pkg/archive/changes_test.go
+++ b/pkg/archive/changes_test.go
@@ -22,15 +22,7 @@
 }
 
 func copyDir(src, dst string) error {
-	cmd := exec.Command("cp", "-a", src, dst)
-	if runtime.GOOS == "solaris" {
-		cmd = exec.Command("gcp", "-a", src, dst)
-	}
-
-	if err := cmd.Run(); err != nil {
-		return err
-	}
-	return nil
+	return exec.Command("cp", "-a", src, dst).Run()
 }
 
 type FileType uint32
@@ -50,32 +42,35 @@
 
 func createSampleDir(t *testing.T, root string) {
 	files := []FileData{
-		{Regular, "file1", "file1\n", 0600},
-		{Regular, "file2", "file2\n", 0666},
-		{Regular, "file3", "file3\n", 0404},
-		{Regular, "file4", "file4\n", 0600},
-		{Regular, "file5", "file5\n", 0600},
-		{Regular, "file6", "file6\n", 0600},
-		{Regular, "file7", "file7\n", 0600},
-		{Dir, "dir1", "", 0740},
-		{Regular, "dir1/file1-1", "file1-1\n", 01444},
-		{Regular, "dir1/file1-2", "file1-2\n", 0666},
-		{Dir, "dir2", "", 0700},
-		{Regular, "dir2/file2-1", "file2-1\n", 0666},
-		{Regular, "dir2/file2-2", "file2-2\n", 0666},
-		{Dir, "dir3", "", 0700},
-		{Regular, "dir3/file3-1", "file3-1\n", 0666},
-		{Regular, "dir3/file3-2", "file3-2\n", 0666},
-		{Dir, "dir4", "", 0700},
-		{Regular, "dir4/file3-1", "file4-1\n", 0666},
-		{Regular, "dir4/file3-2", "file4-2\n", 0666},
-		{Symlink, "symlink1", "target1", 0666},
-		{Symlink, "symlink2", "target2", 0666},
-		{Symlink, "symlink3", root + "/file1", 0666},
-		{Symlink, "symlink4", root + "/symlink3", 0666},
-		{Symlink, "dirSymlink", root + "/dir1", 0740},
+		{filetype: Regular, path: "file1", contents: "file1\n", permissions: 0600},
+		{filetype: Regular, path: "file2", contents: "file2\n", permissions: 0666},
+		{filetype: Regular, path: "file3", contents: "file3\n", permissions: 0404},
+		{filetype: Regular, path: "file4", contents: "file4\n", permissions: 0600},
+		{filetype: Regular, path: "file5", contents: "file5\n", permissions: 0600},
+		{filetype: Regular, path: "file6", contents: "file6\n", permissions: 0600},
+		{filetype: Regular, path: "file7", contents: "file7\n", permissions: 0600},
+		{filetype: Dir, path: "dir1", contents: "", permissions: 0740},
+		{filetype: Regular, path: "dir1/file1-1", contents: "file1-1\n", permissions: 01444},
+		{filetype: Regular, path: "dir1/file1-2", contents: "file1-2\n", permissions: 0666},
+		{filetype: Dir, path: "dir2", contents: "", permissions: 0700},
+		{filetype: Regular, path: "dir2/file2-1", contents: "file2-1\n", permissions: 0666},
+		{filetype: Regular, path: "dir2/file2-2", contents: "file2-2\n", permissions: 0666},
+		{filetype: Dir, path: "dir3", contents: "", permissions: 0700},
+		{filetype: Regular, path: "dir3/file3-1", contents: "file3-1\n", permissions: 0666},
+		{filetype: Regular, path: "dir3/file3-2", contents: "file3-2\n", permissions: 0666},
+		{filetype: Dir, path: "dir4", contents: "", permissions: 0700},
+		{filetype: Regular, path: "dir4/file3-1", contents: "file4-1\n", permissions: 0666},
+		{filetype: Regular, path: "dir4/file3-2", contents: "file4-2\n", permissions: 0666},
+		{filetype: Symlink, path: "symlink1", contents: "target1", permissions: 0666},
+		{filetype: Symlink, path: "symlink2", contents: "target2", permissions: 0666},
+		{filetype: Symlink, path: "symlink3", contents: root + "/file1", permissions: 0666},
+		{filetype: Symlink, path: "symlink4", contents: root + "/symlink3", permissions: 0666},
+		{filetype: Symlink, path: "dirSymlink", contents: root + "/dir1", permissions: 0740},
 	}
+	provisionSampleDir(t, root, files)
+}
 
+func provisionSampleDir(t *testing.T, root string, files []FileData) {
 	now := time.Now()
 	for _, info := range files {
 		p := path.Join(root, info.path)
@@ -188,6 +183,7 @@
 		t.Skip("symlinks on Windows")
 	}
 	baseLayer, err := ioutil.TempDir("", "docker-changes-test.")
+	require.NoError(t, err)
 	defer os.RemoveAll(baseLayer)
 
 	dir3 := path.Join(baseLayer, "dir1/dir2/dir3")
@@ -197,6 +193,7 @@
 	ioutil.WriteFile(file, []byte("hello"), 0666)
 
 	layer, err := ioutil.TempDir("", "docker-changes-test2.")
+	require.NoError(t, err)
 	defer os.RemoveAll(layer)
 
 	// Test creating a new file
@@ -219,6 +216,7 @@
 
 	// Now test changing a file
 	layer, err = ioutil.TempDir("", "docker-changes-test3.")
+	require.NoError(t, err)
 	defer os.RemoveAll(layer)
 
 	if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil {
@@ -241,9 +239,8 @@
 func TestChangesDirsEmpty(t *testing.T) {
 	// TODO Windows. There may be a way of running this, but turning off for now
 	// as createSampleDir uses symlinks.
-	// TODO Should work for Solaris
-	if runtime.GOOS == "windows" || runtime.GOOS == "solaris" {
-		t.Skip("symlinks on Windows; gcp failure on Solaris")
+	if runtime.GOOS == "windows" {
+		t.Skip("symlinks on Windows")
 	}
 	src, err := ioutil.TempDir("", "docker-changes-test")
 	require.NoError(t, err)
@@ -329,9 +326,8 @@
 func TestChangesDirsMutated(t *testing.T) {
 	// TODO Windows. There may be a way of running this, but turning off for now
 	// as createSampleDir uses symlinks.
-	// TODO Should work for Solaris
-	if runtime.GOOS == "windows" || runtime.GOOS == "solaris" {
-		t.Skip("symlinks on Windows; gcp failures on Solaris")
+	if runtime.GOOS == "windows" {
+		t.Skip("symlinks on Windows")
 	}
 	src, err := ioutil.TempDir("", "docker-changes-test")
 	require.NoError(t, err)
@@ -386,9 +382,8 @@
 func TestApplyLayer(t *testing.T) {
 	// TODO Windows. There may be a way of running this, but turning off for now
 	// as createSampleDir uses symlinks.
-	// TODO Should work for Solaris
-	if runtime.GOOS == "windows" || runtime.GOOS == "solaris" {
-		t.Skip("symlinks on Windows; gcp failures on Solaris")
+	if runtime.GOOS == "windows" {
+		t.Skip("symlinks on Windows")
 	}
 	src, err := ioutil.TempDir("", "docker-changes-test")
 	require.NoError(t, err)
@@ -465,6 +460,7 @@
 
 func TestChangesSize(t *testing.T) {
 	parentPath, err := ioutil.TempDir("", "docker-changes-test")
+	require.NoError(t, err)
 	defer os.RemoveAll(parentPath)
 	addition := path.Join(parentPath, "addition")
 	err = ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744)
diff --git a/pkg/archive/copy.go b/pkg/archive/copy.go
index 3adf8a2..d1e036d 100644
--- a/pkg/archive/copy.go
+++ b/pkg/archive/copy.go
@@ -27,23 +27,23 @@
 // path (from before being processed by utility functions from the path or
 // filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
 // path already ends in a `.` path segment, then another is not added. If the
-// clean path already ends in a path separator, then another is not added.
-func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
+// clean path already ends in the separator, then another is not added.
+func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
 	// Ensure paths are in platform semantics
-	cleanedPath = normalizePath(cleanedPath)
-	originalPath = normalizePath(originalPath)
+	cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
+	originalPath = strings.Replace(originalPath, "/", string(sep), -1)
 
 	if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
-		if !hasTrailingPathSeparator(cleanedPath) {
+		if !hasTrailingPathSeparator(cleanedPath, sep) {
 			// Add a separator if it doesn't already end with one (a cleaned
 			// path would only end in a separator if it is the root).
-			cleanedPath += string(filepath.Separator)
+			cleanedPath += string(sep)
 		}
 		cleanedPath += "."
 	}
 
-	if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
-		cleanedPath += string(filepath.Separator)
+	if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
+		cleanedPath += string(sep)
 	}
 
 	return cleanedPath
@@ -52,14 +52,14 @@
 // assertsDirectory returns whether the given path is
 // asserted to be a directory, i.e., the path ends with
 // a trailing '/' or `/.`, assuming a path separator of `/`.
-func assertsDirectory(path string) bool {
-	return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
+func assertsDirectory(path string, sep byte) bool {
+	return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
 }
 
 // hasTrailingPathSeparator returns whether the given
 // path ends with the system's path separator character.
-func hasTrailingPathSeparator(path string) bool {
-	return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
+func hasTrailingPathSeparator(path string, sep byte) bool {
+	return len(path) > 0 && path[len(path)-1] == sep
 }
 
 // specifiesCurrentDir returns whether the given path specifies
@@ -72,10 +72,10 @@
 // basename by first cleaning the path but preserves a trailing "." if the
 // original path specified the current directory.
 func SplitPathDirEntry(path string) (dir, base string) {
-	cleanedPath := filepath.Clean(normalizePath(path))
+	cleanedPath := filepath.Clean(filepath.FromSlash(path))
 
 	if specifiesCurrentDir(path) {
-		cleanedPath += string(filepath.Separator) + "."
+		cleanedPath += string(os.PathSeparator) + "."
 	}
 
 	return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
@@ -106,19 +106,24 @@
 	// Separate the source path between its directory and
 	// the entry in that directory which we are archiving.
 	sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
-
-	filter := []string{sourceBase}
+	opts := TarResourceRebaseOpts(sourceBase, rebaseName)
 
 	logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
+	return TarWithOptions(sourceDir, opts)
+}
 
-	return TarWithOptions(sourceDir, &TarOptions{
+// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
+// parameters to be sent to TarWithOptions (the TarOptions struct)
+func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
+	filter := []string{sourceBase}
+	return &TarOptions{
 		Compression:      Uncompressed,
 		IncludeFiles:     filter,
 		IncludeSourceDir: true,
 		RebaseNames: map[string]string{
 			sourceBase: rebaseName,
 		},
-	})
+	}
 }
 
 // CopyInfo holds basic info about the source
@@ -218,7 +223,7 @@
 		// Ensure destination parent dir exists.
 		dstParent, _ := SplitPathDirEntry(path)
 
-		parentDirStat, err := os.Lstat(dstParent)
+		parentDirStat, err := os.Stat(dstParent)
 		if err != nil {
 			return CopyInfo{}, err
 		}
@@ -281,7 +286,7 @@
 			srcBase = srcInfo.RebaseName
 		}
 		return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
-	case assertsDirectory(dstInfo.Path):
+	case assertsDirectory(dstInfo.Path, os.PathSeparator):
 		// The destination does not exist and is asserted to be created as a
 		// directory, but the source content is not a directory. This is an
 		// error condition since you cannot create a directory from a file
@@ -351,6 +356,9 @@
 	return rebased
 }
 
+// TODO @gupta-ak. These might have to be changed in the future to be
+// continuity driver aware as well to support LCOW.
+
 // CopyResource performs an archive copy from the given source path to the
 // given destination path. The source path MUST exist and the destination
 // path's parent directory must exist.
@@ -365,8 +373,8 @@
 	dstPath = normalizePath(dstPath)
 
 	// Clean the source and destination paths.
-	srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
-	dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
+	srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
+	dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
 
 	if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
 		return err
@@ -429,7 +437,8 @@
 		// resolvedDirPath will have been cleaned (no trailing path separators) so
 		// we can manually join it with the base path element.
 		resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
-		if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
+		if hasTrailingPathSeparator(path, os.PathSeparator) &&
+			filepath.Base(path) != filepath.Base(resolvedPath) {
 			rebaseName = filepath.Base(path)
 		}
 	}
@@ -442,11 +451,13 @@
 	// linkTarget will have been cleaned (no trailing path separators and dot) so
 	// we can manually join it with them
 	var rebaseName string
-	if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
+	if specifiesCurrentDir(path) &&
+		!specifiesCurrentDir(resolvedPath) {
 		resolvedPath += string(filepath.Separator) + "."
 	}
 
-	if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
+	if hasTrailingPathSeparator(path, os.PathSeparator) &&
+		!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
 		resolvedPath += string(filepath.Separator)
 	}
 
diff --git a/pkg/authorization/authz.go b/pkg/authorization/authz.go
index 924908a..3bd2fc0 100644
--- a/pkg/authorization/authz.go
+++ b/pkg/authorization/authz.go
@@ -158,7 +158,7 @@
 
 // headers returns flatten version of the http headers excluding authorization
 func headers(header http.Header) map[string]string {
-	v := make(map[string]string, 0)
+	v := make(map[string]string)
 	for k, values := range header {
 		// Skip authorization headers
 		if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") {
diff --git a/pkg/authorization/plugin.go b/pkg/authorization/plugin.go
index 939f926..2797a72 100644
--- a/pkg/authorization/plugin.go
+++ b/pkg/authorization/plugin.go
@@ -48,9 +48,10 @@
 
 // authorizationPlugin is an internal adapter to docker plugin system
 type authorizationPlugin struct {
-	plugin *plugins.Client
-	name   string
-	once   sync.Once
+	initErr error
+	plugin  *plugins.Client
+	name    string
+	once    sync.Once
 }
 
 func newAuthorizationPlugin(name string) Plugin {
@@ -95,7 +96,6 @@
 // initPlugin initializes the authorization plugin if needed
 func (a *authorizationPlugin) initPlugin() error {
 	// Lazy loading of plugins
-	var err error
 	a.once.Do(func() {
 		if a.plugin == nil {
 			var plugin plugingetter.CompatPlugin
@@ -108,11 +108,11 @@
 				plugin, e = plugins.Get(a.name, AuthZApiImplements)
 			}
 			if e != nil {
-				err = e
+				a.initErr = e
 				return
 			}
 			a.plugin = plugin.Client()
 		}
 	})
-	return err
+	return a.initErr
 }
diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go
index 7604418..d6d0788 100644
--- a/pkg/chrootarchive/archive.go
+++ b/pkg/chrootarchive/archive.go
@@ -16,7 +16,10 @@
 	if idMappings == nil {
 		idMappings = &idtools.IDMappings{}
 	}
-	return &archive.Archiver{Untar: Untar, IDMappings: idMappings}
+	return &archive.Archiver{
+		Untar:         Untar,
+		IDMappingsVar: idMappings,
+	}
 }
 
 // Untar reads a stream of bytes from `archive`, parses it as a tar archive,
diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go
index bd2deb2..1abfb40 100644
--- a/pkg/chrootarchive/archive_test.go
+++ b/pkg/chrootarchive/archive_test.go
@@ -197,8 +197,8 @@
 
 func TestChrootCopyWithTar(t *testing.T) {
 	// TODO Windows: Figure out why this is failing
-	if runtime.GOOS == "windows" || runtime.GOOS == "solaris" {
-		t.Skip("Failing on Windows and Solaris")
+	if runtime.GOOS == "windows" {
+		t.Skip("Failing on Windows")
 	}
 	tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar")
 	if err != nil {
diff --git a/pkg/chrootarchive/chroot_linux.go b/pkg/chrootarchive/chroot_linux.go
index ebc3b84..958e06c 100644
--- a/pkg/chrootarchive/chroot_linux.go
+++ b/pkg/chrootarchive/chroot_linux.go
@@ -26,8 +26,13 @@
 		return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
 	}
 
-	// make everything in new ns private
-	if err := mount.MakeRPrivate("/"); err != nil {
+	// Make everything in new ns slave.
+	// Don't use `private` here as this could race where the mountns gets a
+	//   reference to a mount and an unmount from the host does not propagate,
+	//   which could potentially cause transient errors for other operations,
+	//   even though this should be relatively small window here `slave` should
+	//   not cause any problems.
+	if err := mount.MakeRSlave("/"); err != nil {
 		return err
 	}
 
diff --git a/pkg/containerfs/archiver.go b/pkg/containerfs/archiver.go
new file mode 100644
index 0000000..3eeab49
--- /dev/null
+++ b/pkg/containerfs/archiver.go
@@ -0,0 +1,198 @@
+package containerfs
+
+import (
+	"archive/tar"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/idtools"
+	"github.com/docker/docker/pkg/system"
+	"github.com/sirupsen/logrus"
+)
+
+// TarFunc provides a function definition for a custom Tar function
+type TarFunc func(string, *archive.TarOptions) (io.ReadCloser, error)
+
+// UntarFunc provides a function definition for a custom Untar function
+type UntarFunc func(io.Reader, string, *archive.TarOptions) error
+
+// Archiver provides a similar implementation of the archive.Archiver package with the rootfs abstraction
+type Archiver struct {
+	SrcDriver     Driver
+	DstDriver     Driver
+	Tar           TarFunc
+	Untar         UntarFunc
+	IDMappingsVar *idtools.IDMappings
+}
+
+// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
+// If either Tar or Untar fails, TarUntar aborts and returns the error.
+func (archiver *Archiver) TarUntar(src, dst string) error {
+	logrus.Debugf("TarUntar(%s %s)", src, dst)
+	tarArchive, err := archiver.Tar(src, &archive.TarOptions{Compression: archive.Uncompressed})
+	if err != nil {
+		return err
+	}
+	defer tarArchive.Close()
+	options := &archive.TarOptions{
+		UIDMaps: archiver.IDMappingsVar.UIDs(),
+		GIDMaps: archiver.IDMappingsVar.GIDs(),
+	}
+	return archiver.Untar(tarArchive, dst, options)
+}
+
+// UntarPath untar a file from path to a destination, src is the source tar file path.
+func (archiver *Archiver) UntarPath(src, dst string) error {
+	tarArchive, err := archiver.SrcDriver.Open(src)
+	if err != nil {
+		return err
+	}
+	defer tarArchive.Close()
+	options := &archive.TarOptions{
+		UIDMaps: archiver.IDMappingsVar.UIDs(),
+		GIDMaps: archiver.IDMappingsVar.GIDs(),
+	}
+	return archiver.Untar(tarArchive, dst, options)
+}
+
+// CopyWithTar creates a tar archive of filesystem path `src`, and
+// unpacks it at filesystem path `dst`.
+// The archive is streamed directly with fixed buffering and no
+// intermediary disk IO.
+func (archiver *Archiver) CopyWithTar(src, dst string) error {
+	srcSt, err := archiver.SrcDriver.Stat(src)
+	if err != nil {
+		return err
+	}
+	if !srcSt.IsDir() {
+		return archiver.CopyFileWithTar(src, dst)
+	}
+
+	// if this archiver is set up with ID mapping we need to create
+	// the new destination directory with the remapped root UID/GID pair
+	// as owner
+	rootIDs := archiver.IDMappingsVar.RootPair()
+	// Create dst, copy src's content into it
+	if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
+		return err
+	}
+	logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
+	return archiver.TarUntar(src, dst)
+}
+
+// CopyFileWithTar emulates the behavior of the 'cp' command-line
+// for a single file. It copies a regular file from path `src` to
+// path `dst`, and preserves all its metadata.
+func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
+	logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst)
+	srcDriver := archiver.SrcDriver
+	dstDriver := archiver.DstDriver
+
+	srcSt, err := srcDriver.Stat(src)
+	if err != nil {
+		return err
+	}
+
+	if srcSt.IsDir() {
+		return fmt.Errorf("Can't copy a directory")
+	}
+
+	// Clean up the trailing slash. This must be done in an operating
+	// system specific manner.
+	if dst[len(dst)-1] == dstDriver.Separator() {
+		dst = dstDriver.Join(dst, srcDriver.Base(src))
+	}
+
+	// The original call was system.MkdirAll, which is just
+	// os.MkdirAll on not-Windows and changed for Windows.
+	if dstDriver.OS() == "windows" {
+		// Now we are WCOW
+		if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil {
+			return err
+		}
+	} else {
+		// We can just use the driver.MkdirAll function
+		if err := dstDriver.MkdirAll(dstDriver.Dir(dst), 0700); err != nil {
+			return err
+		}
+	}
+
+	r, w := io.Pipe()
+	errC := make(chan error, 1)
+
+	go func() {
+		defer close(errC)
+		errC <- func() error {
+			defer w.Close()
+
+			srcF, err := srcDriver.Open(src)
+			if err != nil {
+				return err
+			}
+			defer srcF.Close()
+
+			hdr, err := tar.FileInfoHeader(srcSt, "")
+			if err != nil {
+				return err
+			}
+			hdr.Name = dstDriver.Base(dst)
+			if dstDriver.OS() == "windows" {
+				hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+			} else {
+				hdr.Mode = int64(os.FileMode(hdr.Mode))
+			}
+
+			if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil {
+				return err
+			}
+
+			tw := tar.NewWriter(w)
+			defer tw.Close()
+			if err := tw.WriteHeader(hdr); err != nil {
+				return err
+			}
+			if _, err := io.Copy(tw, srcF); err != nil {
+				return err
+			}
+			return nil
+		}()
+	}()
+	defer func() {
+		if er := <-errC; err == nil && er != nil {
+			err = er
+		}
+	}()
+
+	err = archiver.Untar(r, dstDriver.Dir(dst), nil)
+	if err != nil {
+		r.CloseWithError(err)
+	}
+	return err
+}
+
+// IDMappings returns the IDMappings of the archiver.
+func (archiver *Archiver) IDMappings() *idtools.IDMappings {
+	return archiver.IDMappingsVar
+}
+
+func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
+	ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
+	hdr.Uid, hdr.Gid = ids.UID, ids.GID
+	return err
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+	//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
+	permPart := perm & os.ModePerm
+	noPermPart := perm &^ os.ModePerm
+	// Add the x bit: make everything +x from windows
+	permPart |= 0111
+	permPart &= 0755
+
+	return noPermPart | permPart
+}
diff --git a/pkg/containerfs/containerfs.go b/pkg/containerfs/containerfs.go
new file mode 100644
index 0000000..05842ac
--- /dev/null
+++ b/pkg/containerfs/containerfs.go
@@ -0,0 +1,87 @@
+package containerfs
+
+import (
+	"path/filepath"
+	"runtime"
+
+	"github.com/containerd/continuity/driver"
+	"github.com/containerd/continuity/pathdriver"
+	"github.com/docker/docker/pkg/symlink"
+)
+
+// ContainerFS is that represents a root file system
+type ContainerFS interface {
+	// Path returns the path to the root. Note that this may not exist
+	// on the local system, so the continuity operations must be used
+	Path() string
+
+	// ResolveScopedPath evaluates the given path scoped to the root.
+	// For example, if root=/a, and path=/b/c, then this function would return /a/b/c.
+	// If rawPath is true, then the function will not preform any modifications
+	// before path resolution. Otherwise, the function will clean the given path
+	// by making it an absolute path.
+	ResolveScopedPath(path string, rawPath bool) (string, error)
+
+	Driver
+}
+
+// Driver combines both continuity's Driver and PathDriver interfaces with a Platform
+// field to determine the OS.
+type Driver interface {
+	// OS returns the OS where the rootfs is located. Essentially,
+	// runtime.GOOS for everything aside from LCOW, which is "linux"
+	OS() string
+
+	// Architecture returns the hardware architecture where the
+	// container is located.
+	Architecture() string
+
+	// Driver & PathDriver provide methods to manipulate files & paths
+	driver.Driver
+	pathdriver.PathDriver
+}
+
+// NewLocalContainerFS is a helper function to implement daemon's Mount interface
+// when the graphdriver mount point is a local path on the machine.
+func NewLocalContainerFS(path string) ContainerFS {
+	return &local{
+		path:       path,
+		Driver:     driver.LocalDriver,
+		PathDriver: pathdriver.LocalPathDriver,
+	}
+}
+
+// NewLocalDriver provides file and path drivers for a local file system. They are
+// essentially a wrapper around the `os` and `filepath` functions.
+func NewLocalDriver() Driver {
+	return &local{
+		Driver:     driver.LocalDriver,
+		PathDriver: pathdriver.LocalPathDriver,
+	}
+}
+
+type local struct {
+	path string
+	driver.Driver
+	pathdriver.PathDriver
+}
+
+func (l *local) Path() string {
+	return l.path
+}
+
+func (l *local) ResolveScopedPath(path string, rawPath bool) (string, error) {
+	cleanedPath := path
+	if !rawPath {
+		cleanedPath = cleanScopedPath(path)
+	}
+	return symlink.FollowSymlinkInScope(filepath.Join(l.path, cleanedPath), l.path)
+}
+
+func (l *local) OS() string {
+	return runtime.GOOS
+}
+
+func (l *local) Architecture() string {
+	return runtime.GOARCH
+}
diff --git a/pkg/containerfs/containerfs_unix.go b/pkg/containerfs/containerfs_unix.go
new file mode 100644
index 0000000..fbc418f
--- /dev/null
+++ b/pkg/containerfs/containerfs_unix.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package containerfs
+
+import "path/filepath"
+
+// cleanScopedPath preappends a to combine with a mnt path.
+func cleanScopedPath(path string) string {
+	return filepath.Join(string(filepath.Separator), path)
+}
diff --git a/pkg/containerfs/containerfs_windows.go b/pkg/containerfs/containerfs_windows.go
new file mode 100644
index 0000000..56f5a75
--- /dev/null
+++ b/pkg/containerfs/containerfs_windows.go
@@ -0,0 +1,15 @@
+package containerfs
+
+import "path/filepath"
+
+// cleanScopedPath removes the C:\ syntax, and prepares to combine
+// with a volume path
+func cleanScopedPath(path string) string {
+	if len(path) >= 2 {
+		c := path[0]
+		if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+			path = path[2:]
+		}
+	}
+	return filepath.Join(string(filepath.Separator), path)
+}
diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go
index 6a0ac24..4861876 100644
--- a/pkg/devicemapper/devmapper.go
+++ b/pkg/devicemapper/devmapper.go
@@ -351,8 +351,7 @@
 	// disable udev dm rules and delete the symlink under /dev/mapper by itself,
 	// even if the removal is deferred by the kernel.
 	cookie := new(uint)
-	var flags uint16
-	flags = DmUdevDisableLibraryFallback
+	flags := uint16(DmUdevDisableLibraryFallback)
 	if err := task.setCookie(cookie, flags); err != nil {
 		return fmt.Errorf("devicemapper: Can not set cookie: %s", err)
 	}
@@ -465,8 +464,7 @@
 	}
 
 	cookie := new(uint)
-	var flags uint16
-	flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag
+	flags := uint16(DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag)
 	if err := task.setCookie(cookie, flags); err != nil {
 		return fmt.Errorf("devicemapper: Can't set cookie %s", err)
 	}
diff --git a/pkg/devicemapper/devmapper_log.go b/pkg/devicemapper/devmapper_log.go
index 65a202a..f2ac7da 100644
--- a/pkg/devicemapper/devmapper_log.go
+++ b/pkg/devicemapper/devmapper_log.go
@@ -12,7 +12,7 @@
 )
 
 // DevmapperLogger defines methods required to register as a callback for
-// logging events recieved from devicemapper. Note that devicemapper will send
+// logging events received from devicemapper. Note that devicemapper will send
 // *all* logs regardless to callbacks (including debug logs) so it's
 // recommended to not spam the console with the outputs.
 type DevmapperLogger interface {
diff --git a/pkg/devicemapper/devmapper_wrapper_static.go b/pkg/devicemapper/devmapper_wrapper_static.go
deleted file mode 100644
index cf7f26a..0000000
--- a/pkg/devicemapper/devmapper_wrapper_static.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build linux,cgo,static_build
-
-package devicemapper
-
-// #cgo pkg-config: --static devmapper
-import "C"
diff --git a/pkg/directory/directory_unix.go b/pkg/directory/directory_unix.go
index d4f2970..25da197 100644
--- a/pkg/directory/directory_unix.go
+++ b/pkg/directory/directory_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris
+// +build linux freebsd
 
 package directory
 
diff --git a/pkg/discovery/kv/kv_test.go b/pkg/discovery/kv/kv_test.go
index dab3939..adc5c28 100644
--- a/pkg/discovery/kv/kv_test.go
+++ b/pkg/discovery/kv/kv_test.go
@@ -11,7 +11,6 @@
 	"github.com/docker/docker/pkg/discovery"
 	"github.com/docker/libkv"
 	"github.com/docker/libkv/store"
-
 	"github.com/go-check/check"
 )
 
@@ -130,7 +129,6 @@
 
 // Close mock
 func (s *Mock) Close() {
-	return
 }
 
 func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) {
diff --git a/pkg/dmesg/dmesg_linux.go b/pkg/dmesg/dmesg_linux.go
new file mode 100644
index 0000000..7df7f3d
--- /dev/null
+++ b/pkg/dmesg/dmesg_linux.go
@@ -0,0 +1,20 @@
+// +build linux
+
+package dmesg
+
+import (
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// Dmesg returns last messages from the kernel log, up to size bytes
+func Dmesg(size int) []byte {
+	t := uintptr(3) // SYSLOG_ACTION_READ_ALL
+	b := make([]byte, size)
+	amt, _, err := unix.Syscall(unix.SYS_SYSLOG, t, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)))
+	if err != 0 {
+		return []byte{}
+	}
+	return b[:amt]
+}
diff --git a/pkg/dmesg/dmesg_linux_test.go b/pkg/dmesg/dmesg_linux_test.go
new file mode 100644
index 0000000..c5028aa
--- /dev/null
+++ b/pkg/dmesg/dmesg_linux_test.go
@@ -0,0 +1,9 @@
+package dmesg
+
+import (
+	"testing"
+)
+
+func TestDmesg(t *testing.T) {
+	t.Logf("dmesg output follows:\n%v", string(Dmesg(512)))
+}
diff --git a/pkg/fileutils/fileutils_solaris.go b/pkg/fileutils/fileutils_solaris.go
deleted file mode 100644
index 0f2cb7a..0000000
--- a/pkg/fileutils/fileutils_solaris.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package fileutils
-
-// GetTotalUsedFds Returns the number of used File Descriptors.
-// On Solaris these limits are per process and not systemwide
-func GetTotalUsedFds() int {
-	return -1
-}
diff --git a/pkg/idtools/idtools_unix.go b/pkg/idtools/idtools_unix.go
index 8701bb7..ff7968f 100644
--- a/pkg/idtools/idtools_unix.go
+++ b/pkg/idtools/idtools_unix.go
@@ -26,14 +26,19 @@
 	// so that we can chown all of them properly at the end.  If chownExisting is false, we won't
 	// chown the full directory path if it exists
 	var paths []string
-	if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
-		paths = []string{path}
-	} else if err == nil && chownExisting {
+
+	stat, err := system.Stat(path)
+	if err == nil {
+		if !chownExisting {
+			return nil
+		}
+
 		// short-circuit--we were called with an existing directory and chown was requested
-		return os.Chown(path, ownerUID, ownerGID)
-	} else if err == nil {
-		// nothing to do; directory path fully exists already and chown was NOT requested
-		return nil
+		return lazyChown(path, ownerUID, ownerGID, stat)
+	}
+
+	if os.IsNotExist(err) {
+		paths = []string{path}
 	}
 
 	if mkAll {
@@ -60,7 +65,7 @@
 	// even if it existed, we will chown the requested path + any subpaths that
 	// didn't exist when we called MkdirAll
 	for _, pathComponent := range paths {
-		if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
+		if err := lazyChown(pathComponent, ownerUID, ownerGID, nil); err != nil {
 			return err
 		}
 	}
@@ -202,3 +207,20 @@
 	}
 	return bytes.NewReader(out), nil
 }
+
+// lazyChown performs a chown only if the uid/gid don't match what's requested
+// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
+// dir is on an NFS share, so don't call chown unless we absolutely must.
+func lazyChown(p string, uid, gid int, stat *system.StatT) error {
+	if stat == nil {
+		var err error
+		stat, err = system.Stat(p)
+		if err != nil {
+			return err
+		}
+	}
+	if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) {
+		return nil
+	}
+	return os.Chown(p, uid, gid)
+}
diff --git a/pkg/idtools/idtools_unix_test.go b/pkg/idtools/idtools_unix_test.go
index 2463342..6d3b591 100644
--- a/pkg/idtools/idtools_unix_test.go
+++ b/pkg/idtools/idtools_unix_test.go
@@ -6,19 +6,27 @@
 	"fmt"
 	"io/ioutil"
 	"os"
+	"os/user"
 	"path/filepath"
 	"testing"
 
+	"github.com/gotestyourself/gotestyourself/skip"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	"golang.org/x/sys/unix"
 )
 
+const (
+	tempUser = "tempuser"
+)
+
 type node struct {
 	uid int
 	gid int
 }
 
 func TestMkdirAllAs(t *testing.T) {
+	RequiresRoot(t)
 	dirName, err := ioutil.TempDir("", "mkdirall")
 	if err != nil {
 		t.Fatalf("Couldn't create temp dir: %v", err)
@@ -79,6 +87,7 @@
 }
 
 func TestMkdirAllAndChownNew(t *testing.T) {
+	RequiresRoot(t)
 	dirName, err := ioutil.TempDir("", "mkdirnew")
 	require.NoError(t, err)
 	defer os.RemoveAll(dirName)
@@ -119,7 +128,7 @@
 }
 
 func TestMkdirAs(t *testing.T) {
-
+	RequiresRoot(t)
 	dirName, err := ioutil.TempDir("", "mkdir")
 	if err != nil {
 		t.Fatalf("Couldn't create temp dir: %v", err)
@@ -224,6 +233,11 @@
 	return nil
 }
 
+func delUser(t *testing.T, name string) {
+	_, err := execCmd("userdel", name)
+	assert.NoError(t, err)
+}
+
 func TestParseSubidFileWithNewlinesAndComments(t *testing.T) {
 	tmpDir, err := ioutil.TempDir("", "parsesubid")
 	if err != nil {
@@ -251,3 +265,119 @@
 		t.Fatalf("wanted 65536, got %d instead", ranges[0].Length)
 	}
 }
+
+func TestGetRootUIDGID(t *testing.T) {
+	uidMap := []IDMap{
+		{
+			ContainerID: 0,
+			HostID:      os.Getuid(),
+			Size:        1,
+		},
+	}
+	gidMap := []IDMap{
+		{
+			ContainerID: 0,
+			HostID:      os.Getgid(),
+			Size:        1,
+		},
+	}
+
+	uid, gid, err := GetRootUIDGID(uidMap, gidMap)
+	assert.NoError(t, err)
+	assert.Equal(t, os.Getegid(), uid)
+	assert.Equal(t, os.Getegid(), gid)
+
+	uidMapError := []IDMap{
+		{
+			ContainerID: 1,
+			HostID:      os.Getuid(),
+			Size:        1,
+		},
+	}
+	_, _, err = GetRootUIDGID(uidMapError, gidMap)
+	assert.EqualError(t, err, "Container ID 0 cannot be mapped to a host ID")
+}
+
+func TestToContainer(t *testing.T) {
+	uidMap := []IDMap{
+		{
+			ContainerID: 2,
+			HostID:      2,
+			Size:        1,
+		},
+	}
+
+	containerID, err := toContainer(2, uidMap)
+	assert.NoError(t, err)
+	assert.Equal(t, uidMap[0].ContainerID, containerID)
+}
+
+func TestNewIDMappings(t *testing.T) {
+	RequiresRoot(t)
+	_, _, err := AddNamespaceRangesUser(tempUser)
+	assert.NoError(t, err)
+	defer delUser(t, tempUser)
+
+	tempUser, err := user.Lookup(tempUser)
+	assert.NoError(t, err)
+
+	gids, err := tempUser.GroupIds()
+	assert.NoError(t, err)
+	group, err := user.LookupGroupId(string(gids[0]))
+	assert.NoError(t, err)
+
+	idMappings, err := NewIDMappings(tempUser.Username, group.Name)
+	assert.NoError(t, err)
+
+	rootUID, rootGID, err := GetRootUIDGID(idMappings.UIDs(), idMappings.GIDs())
+	assert.NoError(t, err)
+
+	dirName, err := ioutil.TempDir("", "mkdirall")
+	assert.NoError(t, err, "Couldn't create temp directory")
+	defer os.RemoveAll(dirName)
+
+	err = MkdirAllAs(dirName, 0700, rootUID, rootGID)
+	assert.NoError(t, err, "Couldn't change ownership of file path. Got error")
+	assert.True(t, CanAccess(dirName, idMappings.RootPair()), fmt.Sprintf("Unable to access %s directory with user UID:%d and GID:%d", dirName, rootUID, rootGID))
+}
+
+func TestLookupUserAndGroup(t *testing.T) {
+	RequiresRoot(t)
+	uid, gid, err := AddNamespaceRangesUser(tempUser)
+	assert.NoError(t, err)
+	defer delUser(t, tempUser)
+
+	fetchedUser, err := LookupUser(tempUser)
+	assert.NoError(t, err)
+
+	fetchedUserByID, err := LookupUID(uid)
+	assert.NoError(t, err)
+	assert.Equal(t, fetchedUserByID, fetchedUser)
+
+	fetchedGroup, err := LookupGroup(tempUser)
+	assert.NoError(t, err)
+
+	fetchedGroupByID, err := LookupGID(gid)
+	assert.NoError(t, err)
+	assert.Equal(t, fetchedGroupByID, fetchedGroup)
+}
+
+func TestLookupUserAndGroupThatDoesNotExist(t *testing.T) {
+	fakeUser := "fakeuser"
+	_, err := LookupUser(fakeUser)
+	assert.EqualError(t, err, "getent unable to find entry \""+fakeUser+"\" in passwd database")
+
+	_, err = LookupUID(-1)
+	assert.Error(t, err)
+
+	fakeGroup := "fakegroup"
+	_, err = LookupGroup(fakeGroup)
+	assert.EqualError(t, err, "getent unable to find entry \""+fakeGroup+"\" in group database")
+
+	_, err = LookupGID(-1)
+	assert.Error(t, err)
+}
+
+func RequiresRoot(t *testing.T) {
+	skip.IfCondition(t, os.Getuid() != 0, "skipping test that requires root")
+}
diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go
deleted file mode 100644
index 4734c31..0000000
--- a/pkg/jsonlog/jsonlog.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package jsonlog
-
-import (
-	"encoding/json"
-	"fmt"
-	"time"
-)
-
-// JSONLog represents a log message, typically a single entry from a given log stream.
-// JSONLogs can be easily serialized to and from JSON and support custom formatting.
-type JSONLog struct {
-	// Log is the log message
-	Log string `json:"log,omitempty"`
-	// Stream is the log source
-	Stream string `json:"stream,omitempty"`
-	// Created is the created timestamp of log
-	Created time.Time `json:"time"`
-	// Attrs is the list of extra attributes provided by the user
-	Attrs map[string]string `json:"attrs,omitempty"`
-}
-
-// Format returns the log formatted according to format
-// If format is nil, returns the log message
-// If format is json, returns the log marshaled in json format
-// By default, returns the log with the log time formatted according to format.
-func (jl *JSONLog) Format(format string) (string, error) {
-	if format == "" {
-		return jl.Log, nil
-	}
-	if format == "json" {
-		m, err := json.Marshal(jl)
-		return string(m), err
-	}
-	return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil
-}
-
-// Reset resets the log to nil.
-func (jl *JSONLog) Reset() {
-	jl.Log = ""
-	jl.Stream = ""
-	jl.Created = time.Time{}
-}
diff --git a/pkg/jsonlog/jsonlog_marshalling.go b/pkg/jsonlog/jsonlog_marshalling.go
deleted file mode 100644
index 83ce684..0000000
--- a/pkg/jsonlog/jsonlog_marshalling.go
+++ /dev/null
@@ -1,178 +0,0 @@
-// This code was initially generated by ffjson <https://github.com/pquerna/ffjson>
-// This code was generated via the following steps:
-// $ go get -u github.com/pquerna/ffjson
-// $ make BIND_DIR=. shell
-// $ ffjson pkg/jsonlog/jsonlog.go
-// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go
-//
-// It has been modified to improve the performance of time marshalling to JSON
-// and to clean it up.
-// Should this code need to be regenerated when the JSONLog struct is changed,
-// the relevant changes which have been made are:
-// import (
-//        "bytes"
-//-
-//        "unicode/utf8"
-// )
-//
-// func (mj *JSONLog) MarshalJSON() ([]byte, error) {
-//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) {
-//        }
-//        return buf.Bytes(), nil
-// }
-//+
-// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
-//-       var err error
-//-       var obj []byte
-//-       var first bool = true
-//-       _ = obj
-//-       _ = err
-//-       _ = first
-//+       var (
-//+               err       error
-//+               timestamp string
-//+               first     bool = true
-//+       )
-//        buf.WriteString(`{`)
-//        if len(mj.Log) != 0 {
-//                if first == true {
-//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
-//                buf.WriteString(`,`)
-//        }
-//        buf.WriteString(`"time":`)
-//-       obj, err = mj.Created.MarshalJSON()
-//+       timestamp, err = FastTimeMarshalJSON(mj.Created)
-//        if err != nil {
-//                return err
-//        }
-//-       buf.Write(obj)
-//+       buf.WriteString(timestamp)
-//        buf.WriteString(`}`)
-//        return nil
-// }
-// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
-//         if len(mj.Log) != 0 {
-// -                if first == true {
-// -                       first = false
-// -               } else {
-// -                       buf.WriteString(`,`)
-// -               }
-// +               first = false
-//                 buf.WriteString(`"log":`)
-//                 ffjsonWriteJSONString(buf, mj.Log)
-//         }
-
-package jsonlog
-
-import (
-	"bytes"
-	"unicode/utf8"
-)
-
-// MarshalJSON marshals the JSONLog.
-func (mj *JSONLog) MarshalJSON() ([]byte, error) {
-	var buf bytes.Buffer
-	buf.Grow(1024)
-	if err := mj.MarshalJSONBuf(&buf); err != nil {
-		return nil, err
-	}
-	return buf.Bytes(), nil
-}
-
-// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer.
-func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error {
-	var (
-		err       error
-		timestamp string
-		first     = true
-	)
-	buf.WriteString(`{`)
-	if len(mj.Log) != 0 {
-		first = false
-		buf.WriteString(`"log":`)
-		ffjsonWriteJSONString(buf, mj.Log)
-	}
-	if len(mj.Stream) != 0 {
-		if first {
-			first = false
-		} else {
-			buf.WriteString(`,`)
-		}
-		buf.WriteString(`"stream":`)
-		ffjsonWriteJSONString(buf, mj.Stream)
-	}
-	if !first {
-		buf.WriteString(`,`)
-	}
-	buf.WriteString(`"time":`)
-	timestamp, err = FastTimeMarshalJSON(mj.Created)
-	if err != nil {
-		return err
-	}
-	buf.WriteString(timestamp)
-	buf.WriteString(`}`)
-	return nil
-}
-
-func ffjsonWriteJSONString(buf *bytes.Buffer, s string) {
-	const hex = "0123456789abcdef"
-
-	buf.WriteByte('"')
-	start := 0
-	for i := 0; i < len(s); {
-		if b := s[i]; b < utf8.RuneSelf {
-			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
-				i++
-				continue
-			}
-			if start < i {
-				buf.WriteString(s[start:i])
-			}
-			switch b {
-			case '\\', '"':
-				buf.WriteByte('\\')
-				buf.WriteByte(b)
-			case '\n':
-				buf.WriteByte('\\')
-				buf.WriteByte('n')
-			case '\r':
-				buf.WriteByte('\\')
-				buf.WriteByte('r')
-			default:
-
-				buf.WriteString(`\u00`)
-				buf.WriteByte(hex[b>>4])
-				buf.WriteByte(hex[b&0xF])
-			}
-			i++
-			start = i
-			continue
-		}
-		c, size := utf8.DecodeRuneInString(s[i:])
-		if c == utf8.RuneError && size == 1 {
-			if start < i {
-				buf.WriteString(s[start:i])
-			}
-			buf.WriteString(`\ufffd`)
-			i += size
-			start = i
-			continue
-		}
-
-		if c == '\u2028' || c == '\u2029' {
-			if start < i {
-				buf.WriteString(s[start:i])
-			}
-			buf.WriteString(`\u202`)
-			buf.WriteByte(hex[c&0xF])
-			i += size
-			start = i
-			continue
-		}
-		i += size
-	}
-	if start < len(s) {
-		buf.WriteString(s[start:])
-	}
-	buf.WriteByte('"')
-}
diff --git a/pkg/jsonlog/jsonlog_marshalling_test.go b/pkg/jsonlog/jsonlog_marshalling_test.go
deleted file mode 100644
index 8b0d072..0000000
--- a/pkg/jsonlog/jsonlog_marshalling_test.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package jsonlog
-
-import (
-	"regexp"
-	"testing"
-)
-
-func TestJSONLogMarshalJSON(t *testing.T) {
-	logs := map[*JSONLog]string{
-		{Log: `"A log line with \\"`}:           `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`,
-		{Log: "A log line"}:                     `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`,
-		{Log: "A log line with \r"}:             `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`,
-		{Log: "A log line with & < >"}:          `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`,
-		{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`,
-		{Stream: "stdout"}:                      `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`,
-		{}:                                      `^{\"time\":\".{20,}\"}$`,
-		// These ones are a little weird
-		{Log: "\u2028 \u2029"}:      `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`,
-		{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`,
-		{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`,
-	}
-	for jsonLog, expression := range logs {
-		data, err := jsonLog.MarshalJSON()
-		if err != nil {
-			t.Fatal(err)
-		}
-		res := string(data)
-		t.Logf("Result of WriteLog: %q", res)
-		logRe := regexp.MustCompile(expression)
-		if !logRe.MatchString(res) {
-			t.Fatalf("Log line not in expected format [%v]: %q", expression, res)
-		}
-	}
-}
diff --git a/pkg/jsonlog/jsonlogbytes.go b/pkg/jsonlog/jsonlogbytes.go
deleted file mode 100644
index 0ba716f..0000000
--- a/pkg/jsonlog/jsonlogbytes.go
+++ /dev/null
@@ -1,122 +0,0 @@
-package jsonlog
-
-import (
-	"bytes"
-	"encoding/json"
-	"unicode/utf8"
-)
-
-// JSONLogs is based on JSONLog.
-// It allows marshalling JSONLog from Log as []byte
-// and an already marshalled Created timestamp.
-type JSONLogs struct {
-	Log     []byte `json:"log,omitempty"`
-	Stream  string `json:"stream,omitempty"`
-	Created string `json:"time"`
-
-	// json-encoded bytes
-	RawAttrs json.RawMessage `json:"attrs,omitempty"`
-}
-
-// MarshalJSONBuf is based on the same method from JSONLog
-// It has been modified to take into account the necessary changes.
-func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error {
-	var first = true
-
-	buf.WriteString(`{`)
-	if len(mj.Log) != 0 {
-		first = false
-		buf.WriteString(`"log":`)
-		ffjsonWriteJSONBytesAsString(buf, mj.Log)
-	}
-	if len(mj.Stream) != 0 {
-		if first {
-			first = false
-		} else {
-			buf.WriteString(`,`)
-		}
-		buf.WriteString(`"stream":`)
-		ffjsonWriteJSONString(buf, mj.Stream)
-	}
-	if len(mj.RawAttrs) > 0 {
-		if first {
-			first = false
-		} else {
-			buf.WriteString(`,`)
-		}
-		buf.WriteString(`"attrs":`)
-		buf.Write(mj.RawAttrs)
-	}
-	if !first {
-		buf.WriteString(`,`)
-	}
-	buf.WriteString(`"time":`)
-	buf.WriteString(mj.Created)
-	buf.WriteString(`}`)
-	return nil
-}
-
-// This is based on ffjsonWriteJSONBytesAsString. It has been changed
-// to accept a string passed as a slice of bytes.
-func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) {
-	const hex = "0123456789abcdef"
-
-	buf.WriteByte('"')
-	start := 0
-	for i := 0; i < len(s); {
-		if b := s[i]; b < utf8.RuneSelf {
-			if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
-				i++
-				continue
-			}
-			if start < i {
-				buf.Write(s[start:i])
-			}
-			switch b {
-			case '\\', '"':
-				buf.WriteByte('\\')
-				buf.WriteByte(b)
-			case '\n':
-				buf.WriteByte('\\')
-				buf.WriteByte('n')
-			case '\r':
-				buf.WriteByte('\\')
-				buf.WriteByte('r')
-			default:
-
-				buf.WriteString(`\u00`)
-				buf.WriteByte(hex[b>>4])
-				buf.WriteByte(hex[b&0xF])
-			}
-			i++
-			start = i
-			continue
-		}
-		c, size := utf8.DecodeRune(s[i:])
-		if c == utf8.RuneError && size == 1 {
-			if start < i {
-				buf.Write(s[start:i])
-			}
-			buf.WriteString(`\ufffd`)
-			i += size
-			start = i
-			continue
-		}
-
-		if c == '\u2028' || c == '\u2029' {
-			if start < i {
-				buf.Write(s[start:i])
-			}
-			buf.WriteString(`\u202`)
-			buf.WriteByte(hex[c&0xF])
-			i += size
-			start = i
-			continue
-		}
-		i += size
-	}
-	if start < len(s) {
-		buf.Write(s[start:])
-	}
-	buf.WriteByte('"')
-}
diff --git a/pkg/jsonlog/jsonlogbytes_test.go b/pkg/jsonlog/jsonlogbytes_test.go
deleted file mode 100644
index 41049aa..0000000
--- a/pkg/jsonlog/jsonlogbytes_test.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package jsonlog
-
-import (
-	"bytes"
-	"regexp"
-	"testing"
-)
-
-func TestJSONLogsMarshalJSONBuf(t *testing.T) {
-	logs := map[*JSONLogs]string{
-		{Log: []byte(`"A log line with \\"`)}:           `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`,
-		{Log: []byte("A log line")}:                     `^{\"log\":\"A log line\",\"time\":}$`,
-		{Log: []byte("A log line with \r")}:             `^{\"log\":\"A log line with \\r\",\"time\":}$`,
-		{Log: []byte("A log line with & < >")}:          `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`,
-		{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`,
-		{Stream: "stdout"}:                              `^{\"stream\":\"stdout\",\"time\":}$`,
-		{Stream: "stdout", Log: []byte("A log line")}:   `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`,
-		{Created: "time"}:                               `^{\"time\":time}$`,
-		{}:                                              `^{\"time\":}$`,
-		// These ones are a little weird
-		{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`,
-		{Log: []byte{0xaF}}:            `^{\"log\":\"\\ufffd\",\"time\":}$`,
-		{Log: []byte{0x7F}}:            `^{\"log\":\"\x7f\",\"time\":}$`,
-		// with raw attributes
-		{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`,
-	}
-	for jsonLog, expression := range logs {
-		var buf bytes.Buffer
-		if err := jsonLog.MarshalJSONBuf(&buf); err != nil {
-			t.Fatal(err)
-		}
-		res := buf.String()
-		t.Logf("Result of WriteLog: %q", res)
-		logRe := regexp.MustCompile(expression)
-		if !logRe.MatchString(res) {
-			t.Fatalf("Log line not in expected format [%v]: %q", expression, res)
-		}
-	}
-}
diff --git a/pkg/jsonlog/time_marshalling.go b/pkg/jsonlog/time_marshalling.go
deleted file mode 100644
index 2117338..0000000
--- a/pkg/jsonlog/time_marshalling.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON.
-package jsonlog
-
-import (
-	"errors"
-	"time"
-)
-
-const (
-	// RFC3339NanoFixed is our own version of RFC339Nano because we want one
-	// that pads the nano seconds part with zeros to ensure
-	// the timestamps are aligned in the logs.
-	RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
-	// JSONFormat is the format used by FastMarshalJSON
-	JSONFormat = `"` + time.RFC3339Nano + `"`
-)
-
-// FastTimeMarshalJSON avoids one of the extra allocations that
-// time.MarshalJSON is making.
-func FastTimeMarshalJSON(t time.Time) (string, error) {
-	if y := t.Year(); y < 0 || y >= 10000 {
-		// RFC 3339 is clear that years are 4 digits exactly.
-		// See golang.org/issue/4556#c15 for more discussion.
-		return "", errors.New("time.MarshalJSON: year outside of range [0,9999]")
-	}
-	return t.Format(JSONFormat), nil
-}
diff --git a/pkg/jsonlog/time_marshalling_test.go b/pkg/jsonlog/time_marshalling_test.go
deleted file mode 100644
index 02d0302..0000000
--- a/pkg/jsonlog/time_marshalling_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package jsonlog
-
-import (
-	"testing"
-	"time"
-)
-
-// Testing to ensure 'year' fields is between 0 and 9999
-func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) {
-	aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local)
-	json, err := FastTimeMarshalJSON(aTime)
-	if err == nil {
-		t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json)
-	}
-	anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local)
-	json, err = FastTimeMarshalJSON(anotherTime)
-	if err == nil {
-		t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json)
-	}
-
-}
-
-func TestFastTimeMarshalJSON(t *testing.T) {
-	aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC)
-	json, err := FastTimeMarshalJSON(aTime)
-	if err != nil {
-		t.Fatal(err)
-	}
-	expected := "\"2015-05-29T11:01:02.000000003Z\""
-	if json != expected {
-		t.Fatalf("Expected %v, got %v", expected, json)
-	}
-
-	location, err := time.LoadLocation("Europe/Paris")
-	if err != nil {
-		t.Fatal(err)
-	}
-	aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location)
-	json, err = FastTimeMarshalJSON(aTime)
-	if err != nil {
-		t.Fatal(err)
-	}
-	expected = "\"2015-05-29T11:01:02.000000003+02:00\""
-	if json != expected {
-		t.Fatalf("Expected %v, got %v", expected, json)
-	}
-}
diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go
index 09fc4cc..6cfa464 100644
--- a/pkg/jsonmessage/jsonmessage.go
+++ b/pkg/jsonmessage/jsonmessage.go
@@ -9,11 +9,14 @@
 	"time"
 
 	gotty "github.com/Nvveen/Gotty"
-	"github.com/docker/docker/pkg/jsonlog"
 	"github.com/docker/docker/pkg/term"
 	units "github.com/docker/go-units"
 )
 
+// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to
+// ensure the formatted time isalways the same number of characters.
+const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
+
 // JSONError wraps a concrete Code and Message, `Code` is
 // is an integer error code, `Message` is the error message.
 type JSONError struct {
@@ -199,9 +202,9 @@
 		return nil
 	}
 	if jm.TimeNano != 0 {
-		fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed))
+		fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed))
 	} else if jm.Time != 0 {
-		fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed))
+		fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed))
 	}
 	if jm.ID != "" {
 		fmt.Fprintf(out, "%s: ", jm.ID)
diff --git a/pkg/jsonmessage/jsonmessage_test.go b/pkg/jsonmessage/jsonmessage_test.go
index 5206789..9740bcd 100644
--- a/pkg/jsonmessage/jsonmessage_test.go
+++ b/pkg/jsonmessage/jsonmessage_test.go
@@ -8,7 +8,6 @@
 	"testing"
 	"time"
 
-	"github.com/docker/docker/pkg/jsonlog"
 	"github.com/docker/docker/pkg/term"
 	"github.com/stretchr/testify/assert"
 )
@@ -115,8 +114,8 @@
 			From:   "From",
 			Status: "status",
 		}: {
-			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)),
-			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)),
+			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(RFC3339NanoFixed)),
+			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(RFC3339NanoFixed)),
 		},
 		// General, with nano precision time
 		{
@@ -125,8 +124,8 @@
 			From:     "From",
 			Status:   "status",
 		}: {
-			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)),
-			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)),
+			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)),
+			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)),
 		},
 		// General, with both times Nano is preferred
 		{
@@ -136,8 +135,8 @@
 			From:     "From",
 			Status:   "status",
 		}: {
-			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)),
-			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)),
+			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)),
+			fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(RFC3339NanoFixed)),
 		},
 		// Stream over status
 		{
diff --git a/pkg/locker/README.md b/pkg/locker/README.md
index c8dbddc..ce787ae 100644
--- a/pkg/locker/README.md
+++ b/pkg/locker/README.md
@@ -35,7 +35,7 @@
 func (i *important) Get(name string) interface{} {
 	i.locks.Lock(name)
 	defer i.locks.Unlock(name)
-	return data[name]
+	return i.data[name]
 }
 
 func (i *important) Create(name string, data interface{}) {
@@ -44,9 +44,9 @@
 
 	i.createImportant(data)
 
-	s.mu.Lock()
+	i.mu.Lock()
 	i.data[name] = data
-	s.mu.Unlock()
+	i.mu.Unlock()
 }
 
 func (i *important) createImportant(data interface{}) {
diff --git a/pkg/locker/locker_test.go b/pkg/locker/locker_test.go
index 5a297dd..39633c9 100644
--- a/pkg/locker/locker_test.go
+++ b/pkg/locker/locker_test.go
@@ -1,6 +1,8 @@
 package locker
 
 import (
+	"math/rand"
+	"strconv"
 	"sync"
 	"testing"
 	"time"
@@ -122,3 +124,38 @@
 		t.Fatalf("lock should not exist: %v", ctr)
 	}
 }
+
+func BenchmarkLocker(b *testing.B) {
+	l := New()
+	for i := 0; i < b.N; i++ {
+		l.Lock("test")
+		l.Unlock("test")
+	}
+}
+
+func BenchmarkLockerParallel(b *testing.B) {
+	l := New()
+	b.SetParallelism(128)
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			l.Lock("test")
+			l.Unlock("test")
+		}
+	})
+}
+
+func BenchmarkLockerMoreKeys(b *testing.B) {
+	l := New()
+	var keys []string
+	for i := 0; i < 64; i++ {
+		keys = append(keys, strconv.Itoa(i))
+	}
+	b.SetParallelism(128)
+	b.RunParallel(func(pb *testing.PB) {
+		for pb.Next() {
+			k := keys[rand.Intn(len(keys))]
+			l.Lock(k)
+			l.Unlock(k)
+		}
+	})
+}
diff --git a/pkg/loopback/ioctl.go b/pkg/loopback/ioctl.go
index fa744f0..84fcea6 100644
--- a/pkg/loopback/ioctl.go
+++ b/pkg/loopback/ioctl.go
@@ -17,10 +17,7 @@
 }
 
 func ioctlLoopSetFd(loopFd, sparseFd uintptr) error {
-	if err := unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd)); err != nil {
-		return err
-	}
-	return nil
+	return unix.IoctlSetInt(int(loopFd), LoopSetFd, int(sparseFd))
 }
 
 func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error {
@@ -47,8 +44,5 @@
 }
 
 func ioctlLoopSetCapacity(loopFd uintptr, value int) error {
-	if err := unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value); err != nil {
-		return err
-	}
-	return nil
+	return unix.IoctlSetInt(int(loopFd), LoopSetCapacity, value)
 }
diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go
index 9ed741e3..43d5e33 100644
--- a/pkg/mount/flags_unsupported.go
+++ b/pkg/mount/flags_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!freebsd freebsd,!cgo solaris,!cgo
+// +build !linux,!freebsd freebsd,!cgo
 
 package mount
 
diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go
index c9fdfd6..ee5833c 100644
--- a/pkg/mount/mount.go
+++ b/pkg/mount/mount.go
@@ -3,6 +3,8 @@
 import (
 	"sort"
 	"strings"
+
+	"github.com/sirupsen/logrus"
 )
 
 // GetMounts retrieves a list of mounts for the current running process.
@@ -11,7 +13,7 @@
 }
 
 // Mounted determines if a specified mountpoint has been mounted.
-// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab.
+// On Linux it looks at /proc/self/mountinfo.
 func Mounted(mountpoint string) (bool, error) {
 	entries, err := parseMountTable()
 	if err != nil {
@@ -74,12 +76,18 @@
 		if !strings.HasPrefix(m.Mountpoint, target) {
 			continue
 		}
-		if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 {
+		logrus.Debugf("Trying to unmount %s", m.Mountpoint)
+		err = Unmount(m.Mountpoint)
+		if err != nil && i == len(mounts)-1 {
 			if mounted, err := Mounted(m.Mountpoint); err != nil || mounted {
 				return err
 			}
 			// Ignore errors for submounts and continue trying to unmount others
 			// The final unmount should fail if there ane any submounts remaining
+		} else if err != nil {
+			logrus.Errorf("Failed to unmount %s: %v", m.Mountpoint, err)
+		} else if err == nil {
+			logrus.Debugf("Unmounted %s", m.Mountpoint)
 		}
 	}
 	return nil
diff --git a/pkg/mount/mount_unix_test.go b/pkg/mount/mount_unix_test.go
index 253aff3..90fa348 100644
--- a/pkg/mount/mount_unix_test.go
+++ b/pkg/mount/mount_unix_test.go
@@ -1,4 +1,4 @@
-// +build !windows,!solaris
+// +build !windows
 
 package mount
 
diff --git a/pkg/mount/mounter_linux_test.go b/pkg/mount/mounter_linux_test.go
index 47c03b3..f02bd13 100644
--- a/pkg/mount/mounter_linux_test.go
+++ b/pkg/mount/mounter_linux_test.go
@@ -8,6 +8,8 @@
 	"os"
 	"strings"
 	"testing"
+
+	selinux "github.com/opencontainers/selinux/go-selinux"
 )
 
 func TestMount(t *testing.T) {
@@ -101,7 +103,11 @@
 				t.Fatal(err)
 			}
 			defer ensureUnmount(t, target)
-			validateMount(t, target, tc.expectedOpts, tc.expectedOptional, tc.expectedVFS)
+			expectedVFS := tc.expectedVFS
+			if selinux.GetEnabled() && expectedVFS != "" {
+				expectedVFS = expectedVFS + ",seclabel"
+			}
+			validateMount(t, target, tc.expectedOpts, tc.expectedOptional, expectedVFS)
 		})
 	}
 }
diff --git a/pkg/mount/mounter_solaris.go b/pkg/mount/mounter_solaris.go
deleted file mode 100644
index 48b8677..0000000
--- a/pkg/mount/mounter_solaris.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build solaris,cgo
-
-package mount
-
-import (
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-// #include <stdlib.h>
-// #include <stdio.h>
-// #include <sys/mount.h>
-// int Mount(const char *spec, const char *dir, int mflag,
-// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) {
-//     return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen);
-// }
-import "C"
-
-func mount(device, target, mType string, flag uintptr, data string) error {
-	spec := C.CString(device)
-	dir := C.CString(target)
-	fstype := C.CString(mType)
-	_, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0)
-	C.free(unsafe.Pointer(spec))
-	C.free(unsafe.Pointer(dir))
-	C.free(unsafe.Pointer(fstype))
-	return err
-}
-
-func unmount(target string, flag int) error {
-	err := unix.Unmount(target, flag)
-	return err
-}
diff --git a/pkg/mount/mounter_unsupported.go b/pkg/mount/mounter_unsupported.go
index a2a3bb4..eb93365 100644
--- a/pkg/mount/mounter_unsupported.go
+++ b/pkg/mount/mounter_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+// +build !linux,!freebsd freebsd,!cgo
 
 package mount
 
diff --git a/pkg/mount/mountinfo_solaris.go b/pkg/mount/mountinfo_solaris.go
deleted file mode 100644
index ad9ab57..0000000
--- a/pkg/mount/mountinfo_solaris.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build solaris,cgo
-
-package mount
-
-/*
-#include <stdio.h>
-#include <sys/mnttab.h>
-*/
-import "C"
-
-import (
-	"fmt"
-)
-
-func parseMountTable() ([]*Info, error) {
-	mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r"))
-	if mnttab == nil {
-		return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
-	}
-
-	var out []*Info
-	var mp C.struct_mnttab
-
-	ret := C.getmntent(mnttab, &mp)
-	for ret == 0 {
-		var mountinfo Info
-		mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
-		mountinfo.Source = C.GoString(mp.mnt_special)
-		mountinfo.Fstype = C.GoString(mp.mnt_fstype)
-		mountinfo.Opts = C.GoString(mp.mnt_mntopts)
-		out = append(out, &mountinfo)
-		ret = C.getmntent(mnttab, &mp)
-	}
-
-	C.fclose(mnttab)
-	return out, nil
-}
diff --git a/pkg/mount/mountinfo_unsupported.go b/pkg/mount/mountinfo_unsupported.go
index 7fbcf19..b8d9aa5 100644
--- a/pkg/mount/mountinfo_unsupported.go
+++ b/pkg/mount/mountinfo_unsupported.go
@@ -1,4 +1,4 @@
-// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+// +build !windows,!linux,!freebsd freebsd,!cgo
 
 package mount
 
diff --git a/pkg/mount/sharedsubtree_solaris.go b/pkg/mount/sharedsubtree_solaris.go
deleted file mode 100644
index 09f6b03..0000000
--- a/pkg/mount/sharedsubtree_solaris.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// +build solaris
-
-package mount
-
-// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakeShared(mountPoint string) error {
-	return ensureMountedAs(mountPoint, "shared")
-}
-
-// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakeRShared(mountPoint string) error {
-	return ensureMountedAs(mountPoint, "rshared")
-}
-
-// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakePrivate(mountPoint string) error {
-	return ensureMountedAs(mountPoint, "private")
-}
-
-// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
-// enabled. See the supported options in flags.go for further reference.
-func MakeRPrivate(mountPoint string) error {
-	return ensureMountedAs(mountPoint, "rprivate")
-}
-
-// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakeSlave(mountPoint string) error {
-	return ensureMountedAs(mountPoint, "slave")
-}
-
-// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
-// See the supported options in flags.go for further reference.
-func MakeRSlave(mountPoint string) error {
-	return ensureMountedAs(mountPoint, "rslave")
-}
-
-// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
-// enabled. See the supported options in flags.go for further reference.
-func MakeUnbindable(mountPoint string) error {
-	return ensureMountedAs(mountPoint, "unbindable")
-}
-
-// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
-// option enabled. See the supported options in flags.go for further reference.
-func MakeRUnbindable(mountPoint string) error {
-	return ensureMountedAs(mountPoint, "runbindable")
-}
-
-func ensureMountedAs(mountPoint, options string) error {
-	// TODO: Solaris does not support bind mounts.
-	// Evaluate lofs and also look at the relevant
-	// mount flags to be supported.
-	return nil
-}
diff --git a/pkg/namesgenerator/cmd/names-generator/main.go b/pkg/namesgenerator/cmd/names-generator/main.go
index 18a939b..7fd5955 100644
--- a/pkg/namesgenerator/cmd/names-generator/main.go
+++ b/pkg/namesgenerator/cmd/names-generator/main.go
@@ -2,10 +2,13 @@
 
 import (
 	"fmt"
+	"math/rand"
+	"time"
 
 	"github.com/docker/docker/pkg/namesgenerator"
 )
 
 func main() {
+	rand.Seed(time.Now().UnixNano())
 	fmt.Println(namesgenerator.GetRandomName(0))
 }
diff --git a/pkg/parsers/kernel/kernel_unix.go b/pkg/parsers/kernel/kernel_unix.go
index 76e1e49..767ede2 100644
--- a/pkg/parsers/kernel/kernel_unix.go
+++ b/pkg/parsers/kernel/kernel_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris openbsd
+// +build linux freebsd openbsd
 
 // Package kernel provides helper function to get, parse and compare kernel
 // versions for different platforms.
@@ -17,18 +17,8 @@
 		return nil, err
 	}
 
-	release := make([]byte, len(uts.Release))
-
-	i := 0
-	for _, c := range uts.Release {
-		release[i] = byte(c)
-		i++
-	}
-
 	// Remove the \x00 from the release for Atoi to parse correctly
-	release = release[:bytes.IndexByte(release, 0)]
-
-	return ParseRelease(string(release))
+	return ParseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)]))
 }
 
 // CheckKernelVersion checks if current kernel is newer than (or equal to)
diff --git a/pkg/parsers/kernel/uname_unsupported.go b/pkg/parsers/kernel/uname_unsupported.go
index 1da3f23..79c66b3 100644
--- a/pkg/parsers/kernel/uname_unsupported.go
+++ b/pkg/parsers/kernel/uname_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!solaris
+// +build !linux
 
 package kernel
 
diff --git a/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/pkg/parsers/operatingsystem/operatingsystem_solaris.go
deleted file mode 100644
index d08ad14..0000000
--- a/pkg/parsers/operatingsystem/operatingsystem_solaris.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// +build solaris,cgo
-
-package operatingsystem
-
-/*
-#include <zone.h>
-*/
-import "C"
-
-import (
-	"bytes"
-	"errors"
-	"io/ioutil"
-)
-
-var etcOsRelease = "/etc/release"
-
-// GetOperatingSystem gets the name of the current operating system.
-func GetOperatingSystem() (string, error) {
-	b, err := ioutil.ReadFile(etcOsRelease)
-	if err != nil {
-		return "", err
-	}
-	if i := bytes.Index(b, []byte("\n")); i >= 0 {
-		b = bytes.Trim(b[:i], " ")
-		return string(b), nil
-	}
-	return "", errors.New("release not found")
-}
-
-// IsContainerized returns true if we are running inside a container.
-func IsContainerized() (bool, error) {
-	if C.getzoneid() != 0 {
-		return true, nil
-	}
-	return false, nil
-}
diff --git a/pkg/platform/architecture_linux.go b/pkg/platform/architecture_linux.go
index 061127c..55c38b2 100644
--- a/pkg/platform/architecture_linux.go
+++ b/pkg/platform/architecture_linux.go
@@ -3,6 +3,8 @@
 package platform
 
 import (
+	"bytes"
+
 	"golang.org/x/sys/unix"
 )
 
@@ -12,5 +14,5 @@
 	if err := unix.Uname(utsname); err != nil {
 		return "", err
 	}
-	return charsToString(utsname.Machine), nil
+	return string(utsname.Machine[:bytes.IndexByte(utsname.Machine[:], 0)]), nil
 }
diff --git a/pkg/platform/architecture_unix.go b/pkg/platform/architecture_unix.go
index 45bbcf1..8db9086 100644
--- a/pkg/platform/architecture_unix.go
+++ b/pkg/platform/architecture_unix.go
@@ -1,4 +1,4 @@
-// +build freebsd solaris darwin
+// +build freebsd darwin
 
 // Package platform provides helper function to get the runtime architecture
 // for different platforms.
diff --git a/pkg/platform/utsname_int8.go b/pkg/platform/utsname_int8.go
deleted file mode 100644
index a948873..0000000
--- a/pkg/platform/utsname_int8.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build linux,386 linux,amd64 linux,arm64 s390x
-// see golang's sources golang.org/x/sys/unix/ztypes_linux_*.go that use int8
-
-package platform
-
-// Convert the OS/ARCH-specific utsname.Machine to string
-// given as an array of signed int8
-func charsToString(ca [65]int8) string {
-	s := make([]byte, len(ca))
-	var lens int
-	for ; lens < len(ca); lens++ {
-		if ca[lens] == 0 {
-			break
-		}
-		s[lens] = uint8(ca[lens])
-	}
-	return string(s[0:lens])
-}
diff --git a/pkg/platform/utsname_int8_test.go b/pkg/platform/utsname_int8_test.go
deleted file mode 100644
index 7cd7208..0000000
--- a/pkg/platform/utsname_int8_test.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build linux,386 linux,amd64 linux,arm64 s390x
-
-package platform
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestCharToString(t *testing.T) {
-	machineInBytes := [65]int8{120, 56, 54, 95, 54, 52}
-	machineInString := charsToString(machineInBytes)
-	assert.NotNil(t, machineInString, "Unable to convert char into string.")
-	assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.")
-}
diff --git a/pkg/platform/utsname_uint8.go b/pkg/platform/utsname_uint8.go
deleted file mode 100644
index b4af7a5..0000000
--- a/pkg/platform/utsname_uint8.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build linux,arm linux,ppc64 linux,ppc64le
-// see golang's sources golang.org/x/sys/unix/ztypes_linux_*.go that use uint8
-
-package platform
-
-// Convert the OS/ARCH-specific utsname.Machine to string
-// given as an array of unsigned uint8
-func charsToString(ca [65]uint8) string {
-	s := make([]byte, len(ca))
-	var lens int
-	for ; lens < len(ca); lens++ {
-		if ca[lens] == 0 {
-			break
-		}
-		s[lens] = ca[lens]
-	}
-	return string(s[0:lens])
-}
diff --git a/pkg/platform/utsname_uint8_test.go b/pkg/platform/utsname_uint8_test.go
deleted file mode 100644
index 835eaa9..0000000
--- a/pkg/platform/utsname_uint8_test.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build linux,arm linux,ppc64 linux,ppc64le
-
-package platform
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/assert"
-)
-
-func TestTestCharToString(t *testing.T) {
-	machineInBytes := [65]uint8{120, 56, 54, 95, 54, 52}
-	machineInString := charsToString(machineInBytes)
-	assert.NotNil(t, machineInString, "Unable to convert char into string.")
-	assert.Equal(t, string("x86_64"), machineInString, "Parsed machine code not equal.")
-}
diff --git a/pkg/plugingetter/getter.go b/pkg/plugingetter/getter.go
index b04b7bc..b9ffa54 100644
--- a/pkg/plugingetter/getter.go
+++ b/pkg/plugingetter/getter.go
@@ -1,6 +1,8 @@
 package plugingetter
 
-import "github.com/docker/docker/pkg/plugins"
+import (
+	"github.com/docker/docker/pkg/plugins"
+)
 
 const (
 	// Lookup doesn't update RefCount
diff --git a/pkg/plugins/discovery_unix_test.go b/pkg/plugins/discovery_unix_test.go
index 66f5035..e4d156d 100644
--- a/pkg/plugins/discovery_unix_test.go
+++ b/pkg/plugins/discovery_unix_test.go
@@ -10,6 +10,8 @@
 	"path/filepath"
 	"reflect"
 	"testing"
+
+	"github.com/stretchr/testify/require"
 )
 
 func TestLocalSocket(t *testing.T) {
@@ -89,6 +91,7 @@
 
 	r := newLocalRegistry()
 	p, err := r.Plugin(name)
+	require.NoError(t, err)
 
 	pluginNamesNotEmpty, err := Scan()
 	if err != nil {
diff --git a/pkg/pools/pools_test.go b/pkg/pools/pools_test.go
index d71cb99..544c499 100644
--- a/pkg/pools/pools_test.go
+++ b/pkg/pools/pools_test.go
@@ -6,6 +6,9 @@
 	"io"
 	"strings"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) {
@@ -92,22 +95,16 @@
 	buf := new(bytes.Buffer)
 	bw := bufio.NewWriter(buf)
 	writer := BufioWriter32KPool.Get(bw)
-	if writer == nil {
-		t.Fatalf("BufioReaderPool should not return a nil writer.")
-	}
+	require.NotNil(t, writer)
+
 	written, err := writer.Write([]byte("foobar"))
-	if err != nil {
-		t.Fatal(err)
-	}
-	if written != 6 {
-		t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written)
-	}
+	require.NoError(t, err)
+	assert.Equal(t, 6, written)
+
 	// Make sure we Flush all the way ?
 	writer.Flush()
 	bw.Flush()
-	if len(buf.Bytes()) != 6 {
-		t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes()))
-	}
+	assert.Len(t, buf.Bytes(), 6)
 	// Reset the buffer
 	buf.Reset()
 	BufioWriter32KPool.Put(writer)
diff --git a/pkg/promise/promise.go b/pkg/promise/promise.go
deleted file mode 100644
index dd52b90..0000000
--- a/pkg/promise/promise.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package promise
-
-// Go is a basic promise implementation: it wraps calls a function in a goroutine,
-// and returns a channel which will later return the function's return value.
-func Go(f func() error) chan error {
-	ch := make(chan error, 1)
-	go func() {
-		ch <- f()
-	}()
-	return ch
-}
diff --git a/pkg/promise/promise_test.go b/pkg/promise/promise_test.go
deleted file mode 100644
index 287213b..0000000
--- a/pkg/promise/promise_test.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package promise
-
-import (
-	"errors"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func TestGo(t *testing.T) {
-	errCh := Go(functionWithError)
-	er := <-errCh
-	require.EqualValues(t, "Error Occurred", er.Error())
-
-	noErrCh := Go(functionWithNoError)
-	er = <-noErrCh
-	require.Nil(t, er)
-}
-
-func functionWithError() (err error) {
-	return errors.New("Error Occurred")
-}
-func functionWithNoError() (err error) {
-	return nil
-}
diff --git a/pkg/reexec/command_unix.go b/pkg/reexec/command_unix.go
index 778a720..55c0c97 100644
--- a/pkg/reexec/command_unix.go
+++ b/pkg/reexec/command_unix.go
@@ -1,4 +1,4 @@
-// +build freebsd solaris darwin
+// +build freebsd darwin
 
 package reexec
 
diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go
index 76edd82..6f5e55d 100644
--- a/pkg/reexec/command_unsupported.go
+++ b/pkg/reexec/command_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows,!freebsd,!solaris,!darwin
+// +build !linux,!windows,!freebsd,!darwin
 
 package reexec
 
@@ -6,7 +6,7 @@
 	"os/exec"
 )
 
-// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin.
+// Command is unsupported on operating systems apart from Linux, Windows, and Darwin.
 func Command(args ...string) *exec.Cmd {
 	return nil
 }
diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go
index 3594796..66c85c8 100644
--- a/pkg/signal/signal_linux.go
+++ b/pkg/signal/signal_linux.go
@@ -40,7 +40,6 @@
 	"TSTP":     unix.SIGTSTP,
 	"TTIN":     unix.SIGTTIN,
 	"TTOU":     unix.SIGTTOU,
-	"UNUSED":   unix.SIGUNUSED,
 	"URG":      unix.SIGURG,
 	"USR1":     unix.SIGUSR1,
 	"USR2":     unix.SIGUSR2,
diff --git a/pkg/signal/signal_linux_test.go b/pkg/signal/signal_linux_test.go
index 32c056fe..8dc913b 100644
--- a/pkg/signal/signal_linux_test.go
+++ b/pkg/signal/signal_linux_test.go
@@ -1,4 +1,4 @@
-// +build darwin linux solaris
+// +build darwin linux
 
 package signal
 
@@ -41,7 +41,7 @@
 }
 
 func TestStopCatch(t *testing.T) {
-	signal, _ := SignalMap["HUP"]
+	signal := SignalMap["HUP"]
 	channel := make(chan os.Signal, 1)
 	CatchAll(channel)
 	go func() {
diff --git a/pkg/signal/signal_solaris.go b/pkg/signal/signal_solaris.go
deleted file mode 100644
index 89576b9..0000000
--- a/pkg/signal/signal_solaris.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package signal
-
-import (
-	"syscall"
-)
-
-// SignalMap is a map of Solaris signals.
-// SIGINFO and SIGTHR not defined for Solaris
-var SignalMap = map[string]syscall.Signal{
-	"ABRT":   syscall.SIGABRT,
-	"ALRM":   syscall.SIGALRM,
-	"BUF":    syscall.SIGBUS,
-	"CHLD":   syscall.SIGCHLD,
-	"CONT":   syscall.SIGCONT,
-	"EMT":    syscall.SIGEMT,
-	"FPE":    syscall.SIGFPE,
-	"HUP":    syscall.SIGHUP,
-	"ILL":    syscall.SIGILL,
-	"INT":    syscall.SIGINT,
-	"IO":     syscall.SIGIO,
-	"IOT":    syscall.SIGIOT,
-	"KILL":   syscall.SIGKILL,
-	"LWP":    syscall.SIGLWP,
-	"PIPE":   syscall.SIGPIPE,
-	"PROF":   syscall.SIGPROF,
-	"QUIT":   syscall.SIGQUIT,
-	"SEGV":   syscall.SIGSEGV,
-	"STOP":   syscall.SIGSTOP,
-	"SYS":    syscall.SIGSYS,
-	"TERM":   syscall.SIGTERM,
-	"TRAP":   syscall.SIGTRAP,
-	"TSTP":   syscall.SIGTSTP,
-	"TTIN":   syscall.SIGTTIN,
-	"TTOU":   syscall.SIGTTOU,
-	"URG":    syscall.SIGURG,
-	"USR1":   syscall.SIGUSR1,
-	"USR2":   syscall.SIGUSR2,
-	"VTALRM": syscall.SIGVTALRM,
-	"WINCH":  syscall.SIGWINCH,
-	"XCPU":   syscall.SIGXCPU,
-	"XFSZ":   syscall.SIGXFSZ,
-}
diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go
index c592d37..161ba27 100644
--- a/pkg/signal/signal_unsupported.go
+++ b/pkg/signal/signal_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!darwin,!freebsd,!windows,!solaris
+// +build !linux,!darwin,!freebsd,!windows
 
 package signal
 
diff --git a/pkg/stringutils/stringutils.go b/pkg/stringutils/stringutils.go
index 8c4c398..b294de2 100644
--- a/pkg/stringutils/stringutils.go
+++ b/pkg/stringutils/stringutils.go
@@ -7,17 +7,6 @@
 	"strings"
 )
 
-// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n.
-func GenerateRandomAlphaOnlyString(n int) string {
-	// make a really long string
-	letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
-	b := make([]byte, n)
-	for i := range b {
-		b[i] = letters[rand.Intn(len(letters))]
-	}
-	return string(b)
-}
-
 // GenerateRandomASCIIString generates an ASCII random string with length n.
 func GenerateRandomASCIIString(n int) string {
 	chars := "abcdefghijklmnopqrstuvwxyz" +
diff --git a/pkg/stringutils/stringutils_test.go b/pkg/stringutils/stringutils_test.go
index 8af2bdc..15b3cf8 100644
--- a/pkg/stringutils/stringutils_test.go
+++ b/pkg/stringutils/stringutils_test.go
@@ -34,14 +34,6 @@
 	return true
 }
 
-func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) {
-	testLengthHelper(GenerateRandomAlphaOnlyString, t)
-}
-
-func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) {
-	testUniquenessHelper(GenerateRandomAlphaOnlyString, t)
-}
-
 func TestGenerateRandomAsciiStringLength(t *testing.T) {
 	testLengthHelper(GenerateRandomASCIIString, t)
 }
diff --git a/pkg/sysinfo/sysinfo_solaris.go b/pkg/sysinfo/sysinfo_solaris.go
deleted file mode 100644
index c858d57..0000000
--- a/pkg/sysinfo/sysinfo_solaris.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// +build solaris,cgo
-
-package sysinfo
-
-import (
-	"bytes"
-	"os/exec"
-	"strconv"
-	"strings"
-)
-
-/*
-#cgo LDFLAGS: -llgrp
-#include <unistd.h>
-#include <stdlib.h>
-#include <sys/lgrp_user.h>
-int getLgrpCount() {
-	lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE;
-	uint_t nlgrps;
-
-	if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) {
-		return -1;
-	}
-	nlgrps = lgrp_nlgrps(lgrpcookie);
-	return nlgrps;
-}
-*/
-import "C"
-
-// IsCPUSharesAvailable returns whether CPUShares setting is supported.
-// We need FSS to be set as default scheduling class to support CPU Shares
-func IsCPUSharesAvailable() bool {
-	cmd := exec.Command("/usr/sbin/dispadmin", "-d")
-	outBuf := new(bytes.Buffer)
-	errBuf := new(bytes.Buffer)
-	cmd.Stderr = errBuf
-	cmd.Stdout = outBuf
-
-	if err := cmd.Run(); err != nil {
-		return false
-	}
-	return (strings.Contains(outBuf.String(), "FSS"))
-}
-
-// New returns a new SysInfo, using the filesystem to detect which features
-// the kernel supports.
-//NOTE Solaris: If we change the below capabilities be sure
-// to update verifyPlatformContainerSettings() in daemon_solaris.go
-func New(quiet bool) *SysInfo {
-	sysInfo := &SysInfo{}
-	sysInfo.cgroupMemInfo = setCgroupMem(quiet)
-	sysInfo.cgroupCPUInfo = setCgroupCPU(quiet)
-	sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet)
-	sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet)
-
-	sysInfo.IPv4ForwardingDisabled = false
-
-	sysInfo.AppArmor = false
-
-	return sysInfo
-}
-
-// setCgroupMem reads the memory information for Solaris.
-func setCgroupMem(quiet bool) cgroupMemInfo {
-
-	return cgroupMemInfo{
-		MemoryLimit:       true,
-		SwapLimit:         true,
-		MemoryReservation: false,
-		OomKillDisable:    false,
-		MemorySwappiness:  false,
-		KernelMemory:      false,
-	}
-}
-
-// setCgroupCPU reads the cpu information for Solaris.
-func setCgroupCPU(quiet bool) cgroupCPUInfo {
-
-	return cgroupCPUInfo{
-		CPUShares:          true,
-		CPUCfsPeriod:       false,
-		CPUCfsQuota:        true,
-		CPURealtimePeriod:  false,
-		CPURealtimeRuntime: false,
-	}
-}
-
-// blkio switches are not supported in Solaris.
-func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo {
-
-	return cgroupBlkioInfo{
-		BlkioWeight:       false,
-		BlkioWeightDevice: false,
-	}
-}
-
-// setCgroupCPUsetInfo reads the cpuset information for Solaris.
-func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo {
-
-	return cgroupCpusetInfo{
-		Cpuset: true,
-		Cpus:   getCPUCount(),
-		Mems:   getLgrpCount(),
-	}
-}
-
-func getCPUCount() string {
-	ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN)
-	if ncpus <= 0 {
-		return ""
-	}
-	return strconv.FormatInt(int64(ncpus), 16)
-}
-
-func getLgrpCount() string {
-	nlgrps := C.getLgrpCount()
-	if nlgrps <= 0 {
-		return ""
-	}
-	return strconv.FormatInt(int64(nlgrps), 16)
-}
diff --git a/pkg/sysinfo/sysinfo_unix.go b/pkg/sysinfo/sysinfo_unix.go
index 45f3ef1..beac328 100644
--- a/pkg/sysinfo/sysinfo_unix.go
+++ b/pkg/sysinfo/sysinfo_unix.go
@@ -1,8 +1,8 @@
-// +build !linux,!solaris,!windows
+// +build !linux,!windows
 
 package sysinfo
 
-// New returns an empty SysInfo for non linux nor solaris for now.
+// New returns an empty SysInfo for non linux for now.
 func New(quiet bool) *SysInfo {
 	sysInfo := &SysInfo{}
 	return sysInfo
diff --git a/pkg/system/exitcode.go b/pkg/system/exitcode.go
index 60f0514..a5e5616 100644
--- a/pkg/system/exitcode.go
+++ b/pkg/system/exitcode.go
@@ -17,17 +17,3 @@
 	}
 	return exitCode, fmt.Errorf("failed to get exit code")
 }
-
-// ProcessExitCode process the specified error and returns the exit status code
-// if the error was of type exec.ExitError, returns nothing otherwise.
-func ProcessExitCode(err error) (exitCode int) {
-	if err != nil {
-		var exiterr error
-		if exitCode, exiterr = GetExitCode(err); exiterr != nil {
-			// TODO: Fix this so we check the error's text.
-			// we've failed to retrieve exit code, so we set it to 127
-			exitCode = 127
-		}
-	}
-	return
-}
diff --git a/pkg/system/init_unix.go b/pkg/system/init_unix.go
new file mode 100644
index 0000000..a219895
--- /dev/null
+++ b/pkg/system/init_unix.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package system
+
+// InitLCOW does nothing since LCOW is a windows only feature
+func InitLCOW(experimental bool) {
+}
diff --git a/pkg/system/init_windows.go b/pkg/system/init_windows.go
index 019c664..75f8f2c 100644
--- a/pkg/system/init_windows.go
+++ b/pkg/system/init_windows.go
@@ -2,16 +2,16 @@
 
 import "os"
 
-// LCOWSupported determines if Linux Containers on Windows are supported.
-// Note: This feature is in development (06/17) and enabled through an
-// environment variable. At a future time, it will be enabled based
-// on build number. @jhowardmsft
+// lcowSupported determines if Linux Containers on Windows are supported.
 var lcowSupported = false
 
-func init() {
-	// LCOW initialization
-	if os.Getenv("LCOW_SUPPORTED") != "" {
+// InitLCOW sets whether LCOW is supported or not
+// TODO @jhowardmsft.
+// 1. Replace with RS3 RTM build number.
+// 2. Remove the getenv check when image-store is coalesced as shouldn't be needed anymore.
+func InitLCOW(experimental bool) {
+	v := GetOSVersion()
+	if experimental && v.Build > 16270 && os.Getenv("LCOW_SUPPORTED") != "" {
 		lcowSupported = true
 	}
-
 }
diff --git a/pkg/system/lcow.go b/pkg/system/lcow.go
new file mode 100644
index 0000000..b88c11e
--- /dev/null
+++ b/pkg/system/lcow.go
@@ -0,0 +1,58 @@
+package system
+
+import (
+	"fmt"
+	"runtime"
+	"strings"
+
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ValidatePlatform determines if a platform structure is valid.
+// TODO This is a temporary function - can be replaced by parsing from
+// https://github.com/containerd/containerd/pull/1403/files at a later date.
+// @jhowardmsft
+func ValidatePlatform(platform *specs.Platform) error {
+	platform.Architecture = strings.ToLower(platform.Architecture)
+	platform.OS = strings.ToLower(platform.OS)
+	// Based on https://github.com/moby/moby/pull/34642#issuecomment-330375350, do
+	// not support anything except operating system.
+	if platform.Architecture != "" {
+		return fmt.Errorf("invalid platform architecture %q", platform.Architecture)
+	}
+	if platform.OS != "" {
+		if !(platform.OS == runtime.GOOS || (LCOWSupported() && platform.OS == "linux")) {
+			return fmt.Errorf("invalid platform os %q", platform.OS)
+		}
+	}
+	if len(platform.OSFeatures) != 0 {
+		return fmt.Errorf("invalid platform osfeatures %q", platform.OSFeatures)
+	}
+	if platform.OSVersion != "" {
+		return fmt.Errorf("invalid platform osversion %q", platform.OSVersion)
+	}
+	if platform.Variant != "" {
+		return fmt.Errorf("invalid platform variant %q", platform.Variant)
+	}
+	return nil
+}
+
+// ParsePlatform parses a platform string in the format os[/arch[/variant]
+// into an OCI image-spec platform structure.
+// TODO This is a temporary function - can be replaced by parsing from
+// https://github.com/containerd/containerd/pull/1403/files at a later date.
+// @jhowardmsft
+func ParsePlatform(in string) *specs.Platform {
+	p := &specs.Platform{}
+	elements := strings.SplitN(strings.ToLower(in), "/", 3)
+	if len(elements) == 3 {
+		p.Variant = elements[2]
+	}
+	if len(elements) >= 2 {
+		p.Architecture = elements[1]
+	}
+	if len(elements) >= 1 {
+		p.OS = elements[0]
+	}
+	return p
+}
diff --git a/pkg/system/meminfo_solaris.go b/pkg/system/meminfo_solaris.go
deleted file mode 100644
index 925776e..0000000
--- a/pkg/system/meminfo_solaris.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// +build solaris,cgo
-
-package system
-
-import (
-	"fmt"
-	"unsafe"
-)
-
-// #cgo CFLAGS: -std=c99
-// #cgo LDFLAGS: -lkstat
-// #include <unistd.h>
-// #include <stdlib.h>
-// #include <stdio.h>
-// #include <kstat.h>
-// #include <sys/swap.h>
-// #include <sys/param.h>
-// struct swaptable *allocSwaptable(int num) {
-//	struct swaptable *st;
-//	struct swapent *swapent;
-// 	st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
-//	swapent = st->swt_ent;
-//	for (int i = 0; i < num; i++,swapent++) {
-//		swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
-//	}
-//	st->swt_n = num;
-//	return st;
-//}
-// void freeSwaptable (struct swaptable *st) {
-//	struct swapent *swapent = st->swt_ent;
-//	for (int i = 0; i < st->swt_n; i++,swapent++) {
-//		free(swapent->ste_path);
-//	}
-//	free(st);
-// }
-// swapent_t getSwapEnt(swapent_t *ent, int i) {
-//	return ent[i];
-// }
-// int64_t getPpKernel() {
-//	int64_t pp_kernel = 0;
-//	kstat_ctl_t *ksc;
-//	kstat_t *ks;
-//	kstat_named_t *knp;
-//	kid_t kid;
-//
-//	if ((ksc = kstat_open()) == NULL) {
-//		return -1;
-//	}
-//	if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
-//		return -1;
-//	}
-//	if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
-//	    ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
-//		return -1;
-//	}
-//	switch (knp->data_type) {
-//	case KSTAT_DATA_UINT64:
-//		pp_kernel = knp->value.ui64;
-//		break;
-//	case KSTAT_DATA_UINT32:
-//		pp_kernel = knp->value.ui32;
-//		break;
-//	}
-//	pp_kernel *= sysconf(_SC_PAGESIZE);
-//	return (pp_kernel > 0 ? pp_kernel : -1);
-// }
-import "C"
-
-// Get the system memory info using sysconf same as prtconf
-func getTotalMem() int64 {
-	pagesize := C.sysconf(C._SC_PAGESIZE)
-	npages := C.sysconf(C._SC_PHYS_PAGES)
-	return int64(pagesize * npages)
-}
-
-func getFreeMem() int64 {
-	pagesize := C.sysconf(C._SC_PAGESIZE)
-	npages := C.sysconf(C._SC_AVPHYS_PAGES)
-	return int64(pagesize * npages)
-}
-
-// ReadMemInfo retrieves memory statistics of the host system and returns a
-//  MemInfo type.
-func ReadMemInfo() (*MemInfo, error) {
-
-	ppKernel := C.getPpKernel()
-	MemTotal := getTotalMem()
-	MemFree := getFreeMem()
-	SwapTotal, SwapFree, err := getSysSwap()
-
-	if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
-		SwapFree < 0 {
-		return nil, fmt.Errorf("error getting system memory info %v\n", err)
-	}
-
-	meminfo := &MemInfo{}
-	// Total memory is total physical memory less than memory locked by kernel
-	meminfo.MemTotal = MemTotal - int64(ppKernel)
-	meminfo.MemFree = MemFree
-	meminfo.SwapTotal = SwapTotal
-	meminfo.SwapFree = SwapFree
-
-	return meminfo, nil
-}
-
-func getSysSwap() (int64, int64, error) {
-	var tSwap int64
-	var fSwap int64
-	var diskblksPerPage int64
-	num, err := C.swapctl(C.SC_GETNSWP, nil)
-	if err != nil {
-		return -1, -1, err
-	}
-	st := C.allocSwaptable(num)
-	_, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
-	if err != nil {
-		C.freeSwaptable(st)
-		return -1, -1, err
-	}
-
-	diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
-	for i := 0; i < int(num); i++ {
-		swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
-		tSwap += int64(swapent.ste_pages) * diskblksPerPage
-		fSwap += int64(swapent.ste_free) * diskblksPerPage
-	}
-	C.freeSwaptable(st)
-	return tSwap, fSwap, nil
-}
diff --git a/pkg/system/meminfo_unsupported.go b/pkg/system/meminfo_unsupported.go
index 3ce019d..82ddd30 100644
--- a/pkg/system/meminfo_unsupported.go
+++ b/pkg/system/meminfo_unsupported.go
@@ -1,4 +1,4 @@
-// +build !linux,!windows,!solaris
+// +build !linux,!windows
 
 package system
 
diff --git a/pkg/system/mknod.go b/pkg/system/mknod.go
index af79a65..2200ec4 100644
--- a/pkg/system/mknod.go
+++ b/pkg/system/mknod.go
@@ -18,5 +18,5 @@
 // They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major,
 // then the top 12 bits of the minor.
 func Mkdev(major int64, minor int64) uint32 {
-	return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff))
+	return uint32(unix.Mkdev(uint32(major), uint32(minor)))
 }
diff --git a/pkg/system/path.go b/pkg/system/path.go
index f634a6b..034c33c 100644
--- a/pkg/system/path.go
+++ b/pkg/system/path.go
@@ -1,15 +1,22 @@
 package system
 
-import "runtime"
+import (
+	"fmt"
+	"path/filepath"
+	"runtime"
+	"strings"
+
+	"github.com/containerd/continuity/pathdriver"
+)
 
 const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
 
 // DefaultPathEnv is unix style list of directories to search for
 // executables. Each directory is separated from the next by a colon
 // ':' character .
-func DefaultPathEnv(platform string) string {
+func DefaultPathEnv(os string) string {
 	if runtime.GOOS == "windows" {
-		if platform != runtime.GOOS && LCOWSupported() {
+		if os != runtime.GOOS {
 			return defaultUnixPathEnv
 		}
 		// Deliberately empty on Windows containers on Windows as the default path will be set by
@@ -19,3 +26,35 @@
 	return defaultUnixPathEnv
 
 }
+
+// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
+// is the system drive.
+// On Linux: this is a no-op.
+// On Windows: this does the following>
+// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
+// This is used, for example, when validating a user provided path in docker cp.
+// If a drive letter is supplied, it must be the system drive. The drive letter
+// is always removed. Also, it translates it to OS semantics (IOW / to \). We
+// need the path in this syntax so that it can ultimately be contatenated with
+// a Windows long-path which doesn't support drive-letters. Examples:
+// C:			--> Fail
+// C:\			--> \
+// a			--> a
+// /a			--> \a
+// d:\			--> Fail
+func CheckSystemDriveAndRemoveDriveLetter(path string, driver pathdriver.PathDriver) (string, error) {
+	if runtime.GOOS != "windows" || LCOWSupported() {
+		return path, nil
+	}
+
+	if len(path) == 2 && string(path[1]) == ":" {
+		return "", fmt.Errorf("No relative path specified in %q", path)
+	}
+	if !driver.IsAbs(path) || len(path) < 2 {
+		return filepath.FromSlash(path), nil
+	}
+	if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
+		return "", fmt.Errorf("The specified path is not on the system drive (C:)")
+	}
+	return filepath.FromSlash(path[2:]), nil
+}
diff --git a/pkg/system/path_unix.go b/pkg/system/path_unix.go
deleted file mode 100644
index f3762e6..0000000
--- a/pkg/system/path_unix.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !windows
-
-package system
-
-// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter,
-// is the system drive. This is a no-op on Linux.
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
-	return path, nil
-}
diff --git a/pkg/system/path_windows.go b/pkg/system/path_windows.go
deleted file mode 100644
index aab8915..0000000
--- a/pkg/system/path_windows.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build windows
-
-package system
-
-import (
-	"fmt"
-	"path/filepath"
-	"strings"
-)
-
-// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path.
-// This is used, for example, when validating a user provided path in docker cp.
-// If a drive letter is supplied, it must be the system drive. The drive letter
-// is always removed. Also, it translates it to OS semantics (IOW / to \). We
-// need the path in this syntax so that it can ultimately be concatenated with
-// a Windows long-path which doesn't support drive-letters. Examples:
-// C:			--> Fail
-// C:\			--> \
-// a			--> a
-// /a			--> \a
-// d:\			--> Fail
-func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) {
-	if len(path) == 2 && string(path[1]) == ":" {
-		return "", fmt.Errorf("No relative path specified in %q", path)
-	}
-	if !filepath.IsAbs(path) || len(path) < 2 {
-		return filepath.FromSlash(path), nil
-	}
-	if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") {
-		return "", fmt.Errorf("The specified path is not on the system drive (C:)")
-	}
-	return filepath.FromSlash(path[2:]), nil
-}
diff --git a/pkg/system/path_windows_test.go b/pkg/system/path_windows_test.go
index eccb26a..0e6bcab 100644
--- a/pkg/system/path_windows_test.go
+++ b/pkg/system/path_windows_test.go
@@ -2,18 +2,23 @@
 
 package system
 
-import "testing"
+import (
+	"testing"
+
+	"github.com/containerd/continuity/pathdriver"
+)
 
 // TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter
 func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) {
 	// Fails if not C drive.
-	path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`)
+	_, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`, pathdriver.LocalPathDriver)
 	if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") {
 		t.Fatalf("Expected error for d:")
 	}
 
 	// Single character is unchanged
-	if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil {
+	var path string
+	if path, err = CheckSystemDriveAndRemoveDriveLetter("z", pathdriver.LocalPathDriver); err != nil {
 		t.Fatalf("Single character should pass")
 	}
 	if path != "z" {
@@ -21,7 +26,7 @@
 	}
 
 	// Two characters without colon is unchanged
-	if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil {
+	if path, err = CheckSystemDriveAndRemoveDriveLetter("AB", pathdriver.LocalPathDriver); err != nil {
 		t.Fatalf("2 characters without colon should pass")
 	}
 	if path != "AB" {
@@ -29,7 +34,7 @@
 	}
 
 	// Abs path without drive letter
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil {
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`, pathdriver.LocalPathDriver); err != nil {
 		t.Fatalf("abs path no drive letter should pass")
 	}
 	if path != `\l` {
@@ -37,7 +42,7 @@
 	}
 
 	// Abs path without drive letter, linux style
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil {
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`, pathdriver.LocalPathDriver); err != nil {
 		t.Fatalf("abs path no drive letter linux style should pass")
 	}
 	if path != `\l` {
@@ -45,7 +50,7 @@
 	}
 
 	// Drive-colon should be stripped
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil {
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`, pathdriver.LocalPathDriver); err != nil {
 		t.Fatalf("An absolute path should pass")
 	}
 	if path != `\` {
@@ -53,7 +58,7 @@
 	}
 
 	// Verify with a linux-style path
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil {
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`, pathdriver.LocalPathDriver); err != nil {
 		t.Fatalf("An absolute path should pass")
 	}
 	if path != `\` {
@@ -61,7 +66,7 @@
 	}
 
 	// Failure on c:
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil {
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`, pathdriver.LocalPathDriver); err == nil {
 		t.Fatalf("c: should fail")
 	}
 	if err.Error() != `No relative path specified in "c:"` {
@@ -69,7 +74,7 @@
 	}
 
 	// Failure on d:
-	if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil {
+	if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`, pathdriver.LocalPathDriver); err == nil {
 		t.Fatalf("c: should fail")
 	}
 	if err.Error() != `No relative path specified in "d:"` {
diff --git a/pkg/system/process_unix.go b/pkg/system/process_unix.go
index 26c8b42..02c1382 100644
--- a/pkg/system/process_unix.go
+++ b/pkg/system/process_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris darwin
+// +build linux freebsd darwin
 
 package system
 
diff --git a/pkg/system/process_windows.go b/pkg/system/process_windows.go
new file mode 100644
index 0000000..5973c46
--- /dev/null
+++ b/pkg/system/process_windows.go
@@ -0,0 +1,18 @@
+package system
+
+import "os"
+
+// IsProcessAlive returns true if process with a given pid is running.
+func IsProcessAlive(pid int) bool {
+	_, err := os.FindProcess(pid)
+
+	return err == nil
+}
+
+// KillProcess force-stops a process.
+func KillProcess(pid int) {
+	p, err := os.FindProcess(pid)
+	if err == nil {
+		p.Kill()
+	}
+}
diff --git a/pkg/system/rm.go b/pkg/system/rm.go
index 101b569..c453adc 100644
--- a/pkg/system/rm.go
+++ b/pkg/system/rm.go
@@ -26,7 +26,7 @@
 
 	// track retries
 	exitOnErr := make(map[string]int)
-	maxRetry := 5
+	maxRetry := 50
 
 	// Attempt to unmount anything beneath this dir first
 	mount.RecursiveUnmount(dir)
diff --git a/pkg/system/stat_unix_test.go b/pkg/system/stat_unix_test.go
index dee8d30..15c2e27 100644
--- a/pkg/system/stat_unix_test.go
+++ b/pkg/system/stat_unix_test.go
@@ -6,6 +6,8 @@
 	"os"
 	"syscall"
 	"testing"
+
+	"github.com/stretchr/testify/require"
 )
 
 // TestFromStatT tests fromStatT for a tempfile
@@ -15,11 +17,10 @@
 
 	stat := &syscall.Stat_t{}
 	err := syscall.Lstat(file, stat)
+	require.NoError(t, err)
 
 	s, err := fromStatT(stat)
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 	if stat.Mode != s.Mode() {
 		t.Fatal("got invalid mode")
diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go
index 86df0e2..05f00d3 100644
--- a/pkg/tarsum/tarsum_test.go
+++ b/pkg/tarsum/tarsum_test.go
@@ -16,6 +16,9 @@
 	"os"
 	"strings"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 type testLayer struct {
@@ -222,17 +225,13 @@
 func TestEmptyTar(t *testing.T) {
 	// Test without gzip.
 	ts, err := emptyTarSum(false)
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 	zeroBlock := make([]byte, 1024)
 	buf := new(bytes.Buffer)
 
 	n, err := io.Copy(buf, ts)
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 
 	if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) {
 		t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n)
@@ -247,19 +246,16 @@
 
 	// Test with gzip.
 	ts, err = emptyTarSum(true)
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 	buf.Reset()
 
-	n, err = io.Copy(buf, ts)
-	if err != nil {
-		t.Fatal(err)
-	}
+	_, err = io.Copy(buf, ts)
+	require.NoError(t, err)
 
 	bufgz := new(bytes.Buffer)
 	gz := gzip.NewWriter(bufgz)
 	n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock))
+	require.NoError(t, err)
 	gz.Close()
 	gzBytes := bufgz.Bytes()
 
@@ -279,10 +275,7 @@
 	}
 
 	resultSum = ts.Sum(nil)
-
-	if resultSum != expectedSum {
-		t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum)
-	}
+	assert.Equal(t, expectedSum, resultSum)
 }
 
 var (
diff --git a/pkg/term/ascii_test.go b/pkg/term/ascii_test.go
index 4a1e7f3..5078cb7 100644
--- a/pkg/term/ascii_test.go
+++ b/pkg/term/ascii_test.go
@@ -1,43 +1,25 @@
 package term
 
-import "testing"
+import (
+	"testing"
+
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
+)
 
 func TestToBytes(t *testing.T) {
 	codes, err := ToBytes("ctrl-a,a")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(codes) != 2 {
-		t.Fatalf("Expected 2 codes, got %d", len(codes))
-	}
-	if codes[0] != 1 || codes[1] != 97 {
-		t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1])
-	}
+	require.NoError(t, err)
+	assert.Equal(t, []byte{1, 97}, codes)
 
-	codes, err = ToBytes("shift-z")
-	if err == nil {
-		t.Fatalf("Expected error, got none")
-	}
+	_, err = ToBytes("shift-z")
+	assert.Error(t, err)
 
 	codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(codes) != 4 {
-		t.Fatalf("Expected 4 codes, got %d", len(codes))
-	}
-	if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 {
-		t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3])
-	}
+	require.NoError(t, err)
+	assert.Equal(t, []byte{0, 27, 126, 15}, codes)
 
 	codes, err = ToBytes("DEL,+")
-	if err != nil {
-		t.Fatal(err)
-	}
-	if len(codes) != 2 {
-		t.Fatalf("Expected 2 codes, got %d", len(codes))
-	}
-	if codes[0] != 127 || codes[1] != 43 {
-		t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1])
-	}
+	require.NoError(t, err)
+	assert.Equal(t, []byte{127, 43}, codes)
 }
diff --git a/pkg/term/tc.go b/pkg/term/tc.go
index 6d2dfd3..19dbb1c 100644
--- a/pkg/term/tc.go
+++ b/pkg/term/tc.go
@@ -1,5 +1,4 @@
 // +build !windows
-// +build !solaris !cgo
 
 package term
 
diff --git a/pkg/term/tc_solaris_cgo.go b/pkg/term/tc_solaris_cgo.go
deleted file mode 100644
index 50234af..0000000
--- a/pkg/term/tc_solaris_cgo.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// +build solaris,cgo
-
-package term
-
-import (
-	"syscall"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-// #include <termios.h>
-import "C"
-
-// Termios is the Unix API for terminal I/O.
-// It is passthrough for unix.Termios in order to make it portable with
-// other platforms where it is not available or handled differently.
-type Termios unix.Termios
-
-// MakeRaw put the terminal connected to the given file descriptor into raw
-// mode and returns the previous state of the terminal so that it can be
-// restored.
-func MakeRaw(fd uintptr) (*State, error) {
-	var oldState State
-	if err := tcget(fd, &oldState.termios); err != 0 {
-		return nil, err
-	}
-
-	newState := oldState.termios
-
-	newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON | unix.IXANY)
-	newState.Oflag &^= unix.OPOST
-	newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
-	newState.Cflag &^= (unix.CSIZE | unix.PARENB)
-	newState.Cflag |= unix.CS8
-
-	/*
-		VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned
-		Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It
-		needs to be explicitly set to 1.
-	*/
-	newState.Cc[C.VMIN] = 1
-	newState.Cc[C.VTIME] = 0
-
-	if err := tcset(fd, &newState); err != 0 {
-		return nil, err
-	}
-	return &oldState, nil
-}
-
-func tcget(fd uintptr, p *Termios) syscall.Errno {
-	ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p)))
-	if ret != 0 {
-		return err.(syscall.Errno)
-	}
-	return 0
-}
-
-func tcset(fd uintptr, p *Termios) syscall.Errno {
-	ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p)))
-	if ret != 0 {
-		return err.(syscall.Errno)
-	}
-	return 0
-}
diff --git a/pkg/term/term_linux_test.go b/pkg/term/term_linux_test.go
index f907ff5..0bb6f1c 100644
--- a/pkg/term/term_linux_test.go
+++ b/pkg/term/term_linux_test.go
@@ -68,6 +68,7 @@
 	require.Equal(t, inFd, tty.Fd())
 	require.Equal(t, isTerminal, true)
 	tmpFile, err := newTempFile()
+	require.NoError(t, err)
 	defer tmpFile.Close()
 	inFd, isTerminal = GetFdInfo(tmpFile)
 	require.Equal(t, inFd, tmpFile.Fd())
@@ -81,6 +82,7 @@
 	isTerminal := IsTerminal(tty.Fd())
 	require.Equal(t, isTerminal, true)
 	tmpFile, err := newTempFile()
+	require.NoError(t, err)
 	defer tmpFile.Close()
 	isTerminal = IsTerminal(tmpFile.Fd())
 	require.Equal(t, isTerminal, false)
@@ -94,6 +96,7 @@
 	require.NoError(t, err)
 	require.NotNil(t, state)
 	tty, err = newTtyForTest(t)
+	require.NoError(t, err)
 	defer tty.Close()
 	err = RestoreTerminal(tty.Fd(), state)
 	require.NoError(t, err)
diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go
index c0332c3..b6819b3 100644
--- a/pkg/term/term_windows.go
+++ b/pkg/term/term_windows.go
@@ -23,14 +23,7 @@
 	Width  uint16
 }
 
-const (
-	// https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx
-	enableVirtualTerminalInput      = 0x0200
-	enableVirtualTerminalProcessing = 0x0004
-	disableNewlineAutoReturn        = 0x0008
-)
-
-// vtInputSupported is true if enableVirtualTerminalInput is supported by the console
+// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console
 var vtInputSupported bool
 
 // StdStreams returns the standard streams (stdin, stdout, stderr).
@@ -40,8 +33,8 @@
 	var emulateStdin, emulateStdout, emulateStderr bool
 	fd := os.Stdin.Fd()
 	if mode, err := winterm.GetConsoleMode(fd); err == nil {
-		// Validate that enableVirtualTerminalInput is supported, but do not set it.
-		if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil {
+		// Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil {
 			emulateStdin = true
 		} else {
 			vtInputSupported = true
@@ -53,21 +46,21 @@
 
 	fd = os.Stdout.Fd()
 	if mode, err := winterm.GetConsoleMode(fd); err == nil {
-		// Validate disableNewlineAutoReturn is supported, but do not set it.
-		if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+		// Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
 			emulateStdout = true
 		} else {
-			winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+			winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
 		}
 	}
 
 	fd = os.Stderr.Fd()
 	if mode, err := winterm.GetConsoleMode(fd); err == nil {
-		// Validate disableNewlineAutoReturn is supported, but do not set it.
-		if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil {
+		// Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it.
+		if err = winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING|winterm.DISABLE_NEWLINE_AUTO_RETURN); err != nil {
 			emulateStderr = true
 		} else {
-			winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing)
+			winterm.SetConsoleMode(fd, mode|winterm.ENABLE_VIRTUAL_TERMINAL_PROCESSING)
 		}
 	}
 
@@ -183,9 +176,9 @@
 		return nil, err
 	}
 
-	// Ignore failures, since disableNewlineAutoReturn might not be supported on this
+	// Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this
 	// version of Windows.
-	winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn)
+	winterm.SetConsoleMode(fd, state.mode|winterm.DISABLE_NEWLINE_AUTO_RETURN)
 	return state, err
 }
 
@@ -215,7 +208,7 @@
 	mode |= winterm.ENABLE_INSERT_MODE
 	mode |= winterm.ENABLE_QUICK_EDIT_MODE
 	if vtInputSupported {
-		mode |= enableVirtualTerminalInput
+		mode |= winterm.ENABLE_VIRTUAL_TERMINAL_INPUT
 	}
 
 	err = winterm.SetConsoleMode(fd, mode)
diff --git a/pkg/term/winsize.go b/pkg/term/winsize.go
index 85c4d9d..1ef98d5 100644
--- a/pkg/term/winsize.go
+++ b/pkg/term/winsize.go
@@ -1,4 +1,4 @@
-// +build !solaris,!windows
+// +build !windows
 
 package term
 
diff --git a/pkg/term/winsize_solaris_cgo.go b/pkg/term/winsize_solaris_cgo.go
deleted file mode 100644
index 39c1d32..0000000
--- a/pkg/term/winsize_solaris_cgo.go
+++ /dev/null
@@ -1,42 +0,0 @@
-// +build solaris,cgo
-
-package term
-
-import (
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-/*
-#include <unistd.h>
-#include <stropts.h>
-#include <termios.h>
-
-// Small wrapper to get rid of variadic args of ioctl()
-int my_ioctl(int fd, int cmd, struct winsize *ws) {
-	return ioctl(fd, cmd, ws);
-}
-*/
-import "C"
-
-// GetWinsize returns the window size based on the specified file descriptor.
-func GetWinsize(fd uintptr) (*Winsize, error) {
-	ws := &Winsize{}
-	ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
-	// Skip retval = 0
-	if ret == 0 {
-		return ws, nil
-	}
-	return ws, err
-}
-
-// SetWinsize tries to set the specified window size for the specified file descriptor.
-func SetWinsize(fd uintptr, ws *Winsize) error {
-	ret, err := C.my_ioctl(C.int(fd), C.int(unix.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws)))
-	// Skip retval = 0
-	if ret == 0 {
-		return nil
-	}
-	return err
-}
diff --git a/pkg/tlsconfig/tlsconfig_clone.go b/pkg/tlsconfig/tlsconfig_clone.go
deleted file mode 100644
index e4dec3a..0000000
--- a/pkg/tlsconfig/tlsconfig_clone.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build go1.8
-
-package tlsconfig
-
-import "crypto/tls"
-
-// Clone returns a clone of tls.Config. This function is provided for
-// compatibility for go1.7 that doesn't include this method in stdlib.
-func Clone(c *tls.Config) *tls.Config {
-	return c.Clone()
-}
diff --git a/pkg/tlsconfig/tlsconfig_clone_go17.go b/pkg/tlsconfig/tlsconfig_clone_go17.go
deleted file mode 100644
index 0d5b448..0000000
--- a/pkg/tlsconfig/tlsconfig_clone_go17.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// +build go1.7,!go1.8
-
-package tlsconfig
-
-import "crypto/tls"
-
-// Clone returns a clone of tls.Config. This function is provided for
-// compatibility for go1.7 that doesn't include this method in stdlib.
-func Clone(c *tls.Config) *tls.Config {
-	return &tls.Config{
-		Rand:                        c.Rand,
-		Time:                        c.Time,
-		Certificates:                c.Certificates,
-		NameToCertificate:           c.NameToCertificate,
-		GetCertificate:              c.GetCertificate,
-		RootCAs:                     c.RootCAs,
-		NextProtos:                  c.NextProtos,
-		ServerName:                  c.ServerName,
-		ClientAuth:                  c.ClientAuth,
-		ClientCAs:                   c.ClientCAs,
-		InsecureSkipVerify:          c.InsecureSkipVerify,
-		CipherSuites:                c.CipherSuites,
-		PreferServerCipherSuites:    c.PreferServerCipherSuites,
-		SessionTicketsDisabled:      c.SessionTicketsDisabled,
-		SessionTicketKey:            c.SessionTicketKey,
-		ClientSessionCache:          c.ClientSessionCache,
-		MinVersion:                  c.MinVersion,
-		MaxVersion:                  c.MaxVersion,
-		CurvePreferences:            c.CurvePreferences,
-		DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
-		Renegotiation:               c.Renegotiation,
-	}
-}
diff --git a/plugin/backend_linux.go b/plugin/backend_linux.go
index 8a31e97..28a6c18 100644
--- a/plugin/backend_linux.go
+++ b/plugin/backend_linux.go
@@ -146,7 +146,7 @@
 	return s.config, nil
 }
 
-func (s *tempConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) {
+func (s *tempConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
 	return configToRootFS(c)
 }
 
@@ -365,7 +365,7 @@
 
 	enabledOnly := false
 	disabledOnly := false
-	if pluginFilters.Include("enabled") {
+	if pluginFilters.Contains("enabled") {
 		if pluginFilters.ExactMatch("enabled", "true") {
 			enabledOnly = true
 		} else if pluginFilters.ExactMatch("enabled", "false") {
@@ -386,7 +386,7 @@
 		if disabledOnly && p.PluginObj.Enabled {
 			continue
 		}
-		if pluginFilters.Include("capability") {
+		if pluginFilters.Contains("capability") {
 			for _, f := range p.GetTypes() {
 				if !pluginFilters.Match("capability", f.Capability) {
 					continue next
@@ -533,7 +533,7 @@
 	return ioutil.ReadAll(rwc)
 }
 
-func (s *pluginConfigStore) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) {
+func (s *pluginConfigStore) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
 	return configToRootFS(c)
 }
 
diff --git a/plugin/blobstore.go b/plugin/blobstore.go
index c1259b6..1f358e6 100644
--- a/plugin/blobstore.go
+++ b/plugin/blobstore.go
@@ -126,7 +126,7 @@
 	configDigest digest.Digest
 }
 
-func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, platform layer.Platform, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
+func (dm *downloadManager) Download(ctx context.Context, initialRootFS image.RootFS, os layer.OS, layers []xfer.DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) {
 	// TODO @jhowardmsft LCOW: May need revisiting.
 	for _, l := range layers {
 		b, err := dm.blobStore.New()
@@ -179,6 +179,6 @@
 func (dm *downloadManager) Get(d digest.Digest) ([]byte, error) {
 	return nil, fmt.Errorf("digest not found")
 }
-func (dm *downloadManager) RootFSAndPlatformFromConfig(c []byte) (*image.RootFS, layer.Platform, error) {
+func (dm *downloadManager) RootFSAndOSFromConfig(c []byte) (*image.RootFS, layer.OS, error) {
 	return configToRootFS(c)
 }
diff --git a/plugin/executor/containerd/containerd.go b/plugin/executor/containerd/containerd.go
new file mode 100644
index 0000000..d93b8b7
--- /dev/null
+++ b/plugin/executor/containerd/containerd.go
@@ -0,0 +1,146 @@
+package containerd
+
+import (
+	"context"
+	"io"
+	"path/filepath"
+	"sync"
+
+	"github.com/containerd/containerd"
+	"github.com/containerd/containerd/linux/runcopts"
+	"github.com/docker/docker/api/errdefs"
+	"github.com/docker/docker/libcontainerd"
+	"github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+// PluginNamespace is the name used for the plugins namespace
+var PluginNamespace = "moby-plugins"
+
+// ExitHandler represents an object that is called when the exit event is received from containerd
+type ExitHandler interface {
+	HandleExitEvent(id string) error
+}
+
+// New creates a new containerd plugin executor
+func New(rootDir string, remote libcontainerd.Remote, exitHandler ExitHandler) (*Executor, error) {
+	e := &Executor{
+		rootDir:     rootDir,
+		exitHandler: exitHandler,
+	}
+	client, err := remote.NewClient(PluginNamespace, e)
+	if err != nil {
+		return nil, errors.Wrap(err, "error creating containerd exec client")
+	}
+	e.client = client
+	return e, nil
+}
+
+// Executor is the containerd client implementation of a plugin executor
+type Executor struct {
+	rootDir     string
+	client      libcontainerd.Client
+	exitHandler ExitHandler
+}
+
+// Create creates a new container
+func (e *Executor) Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error {
+	opts := runcopts.RuncOptions{
+		RuntimeRoot: filepath.Join(e.rootDir, "runtime-root"),
+	}
+	ctx := context.Background()
+	err := e.client.Create(ctx, id, &spec, &opts)
+	if err != nil {
+		return err
+	}
+
+	_, err = e.client.Start(ctx, id, "", false, attachStreamsFunc(stdout, stderr))
+	return err
+}
+
+// Restore restores a container
+func (e *Executor) Restore(id string, stdout, stderr io.WriteCloser) error {
+	alive, _, err := e.client.Restore(context.Background(), id, attachStreamsFunc(stdout, stderr))
+	if err != nil && !errdefs.IsNotFound(err) {
+		return err
+	}
+	if !alive {
+		_, _, err = e.client.DeleteTask(context.Background(), id)
+		if err != nil && !errdefs.IsNotFound(err) {
+			logrus.WithError(err).Errorf("failed to delete container plugin %s task from containerd", id)
+			return err
+		}
+
+		err = e.client.Delete(context.Background(), id)
+		if err != nil && !errdefs.IsNotFound(err) {
+			logrus.WithError(err).Errorf("failed to delete container plugin %s from containerd", id)
+			return err
+		}
+	}
+	return nil
+}
+
+// IsRunning returns if the container with the given id is running
+func (e *Executor) IsRunning(id string) (bool, error) {
+	status, err := e.client.Status(context.Background(), id)
+	return status == libcontainerd.StatusRunning, err
+}
+
+// Signal sends the specified signal to the container
+func (e *Executor) Signal(id string, signal int) error {
+	return e.client.SignalProcess(context.Background(), id, libcontainerd.InitProcessName, signal)
+}
+
+// ProcessEvent handles events from containerd
+// All events are ignored except the exit event, which is sent of to the stored handler
+func (e *Executor) ProcessEvent(id string, et libcontainerd.EventType, ei libcontainerd.EventInfo) error {
+	switch et {
+	case libcontainerd.EventExit:
+		// delete task and container
+		if _, _, err := e.client.DeleteTask(context.Background(), id); err != nil {
+			logrus.WithError(err).Errorf("failed to delete container plugin %s task from containerd", id)
+		}
+
+		if err := e.client.Delete(context.Background(), id); err != nil {
+			logrus.WithError(err).Errorf("failed to delete container plugin %s from containerd", id)
+		}
+		return e.exitHandler.HandleExitEvent(ei.ContainerID)
+	}
+	return nil
+}
+
+type cio struct {
+	containerd.IO
+
+	wg sync.WaitGroup
+}
+
+func (c *cio) Wait() {
+	c.wg.Wait()
+	c.IO.Wait()
+}
+
+func attachStreamsFunc(stdout, stderr io.WriteCloser) libcontainerd.StdioCallback {
+	return func(iop *libcontainerd.IOPipe) (containerd.IO, error) {
+		if iop.Stdin != nil {
+			iop.Stdin.Close()
+			// closing stdin shouldn't be needed here, it should never be open
+			panic("plugin stdin shouldn't have been created!")
+		}
+
+		cio := &cio{IO: iop}
+		cio.wg.Add(2)
+		go func() {
+			io.Copy(stdout, iop.Stdout)
+			stdout.Close()
+			cio.wg.Done()
+		}()
+		go func() {
+			io.Copy(stderr, iop.Stderr)
+			stderr.Close()
+			cio.wg.Done()
+		}()
+		return cio, nil
+	}
+}
diff --git a/plugin/manager.go b/plugin/manager.go
index 2281dfd..f144e82 100644
--- a/plugin/manager.go
+++ b/plugin/manager.go
@@ -17,7 +17,6 @@
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/image"
 	"github.com/docker/docker/layer"
-	"github.com/docker/docker/libcontainerd"
 	"github.com/docker/docker/pkg/authorization"
 	"github.com/docker/docker/pkg/ioutils"
 	"github.com/docker/docker/pkg/mount"
@@ -26,6 +25,7 @@
 	"github.com/docker/docker/plugin/v2"
 	"github.com/docker/docker/registry"
 	"github.com/opencontainers/go-digest"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
 	"github.com/pkg/errors"
 	"github.com/sirupsen/logrus"
 )
@@ -35,6 +35,14 @@
 
 var validFullID = regexp.MustCompile(`^([a-f0-9]{64})$`)
 
+// Executor is the interface that the plugin manager uses to interact with for starting/stopping plugins
+type Executor interface {
+	Create(id string, spec specs.Spec, stdout, stderr io.WriteCloser) error
+	Restore(id string, stdout, stderr io.WriteCloser) error
+	IsRunning(id string) (bool, error)
+	Signal(id string, signal int) error
+}
+
 func (pm *Manager) restorePlugin(p *v2.Plugin) error {
 	if p.IsEnabled() {
 		return pm.restore(p)
@@ -47,24 +55,27 @@
 // ManagerConfig defines configuration needed to start new manager.
 type ManagerConfig struct {
 	Store              *Store // remove
-	Executor           libcontainerd.Remote
 	RegistryService    registry.Service
 	LiveRestoreEnabled bool // TODO: remove
 	LogPluginEvent     eventLogger
 	Root               string
 	ExecRoot           string
+	CreateExecutor     ExecutorCreator
 	AuthzMiddleware    *authorization.Middleware
 }
 
+// ExecutorCreator is used in the manager config to pass in an `Executor`
+type ExecutorCreator func(*Manager) (Executor, error)
+
 // Manager controls the plugin subsystem.
 type Manager struct {
-	config           ManagerConfig
-	mu               sync.RWMutex // protects cMap
-	muGC             sync.RWMutex // protects blobstore deletions
-	cMap             map[*v2.Plugin]*controller
-	containerdClient libcontainerd.Client
-	blobStore        *basicBlobStore
-	publisher        *pubsub.Publisher
+	config    ManagerConfig
+	mu        sync.RWMutex // protects cMap
+	muGC      sync.RWMutex // protects blobstore deletions
+	cMap      map[*v2.Plugin]*controller
+	blobStore *basicBlobStore
+	publisher *pubsub.Publisher
+	executor  Executor
 }
 
 // controller represents the manager's control on a plugin.
@@ -96,14 +107,10 @@
 	manager := &Manager{
 		config: config,
 	}
-	if err := os.MkdirAll(manager.config.Root, 0700); err != nil {
-		return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.Root)
-	}
-	if err := os.MkdirAll(manager.config.ExecRoot, 0700); err != nil {
-		return nil, errors.Wrapf(err, "failed to mkdir %v", manager.config.ExecRoot)
-	}
-	if err := os.MkdirAll(manager.tmpDir(), 0700); err != nil {
-		return nil, errors.Wrapf(err, "failed to mkdir %v", manager.tmpDir())
+	for _, dirName := range []string{manager.config.Root, manager.config.ExecRoot, manager.tmpDir()} {
+		if err := os.MkdirAll(dirName, 0700); err != nil {
+			return nil, errors.Wrapf(err, "failed to mkdir %v", dirName)
+		}
 	}
 
 	if err := setupRoot(manager.config.Root); err != nil {
@@ -111,10 +118,11 @@
 	}
 
 	var err error
-	manager.containerdClient, err = config.Executor.Client(manager) // todo: move to another struct
+	manager.executor, err = config.CreateExecutor(manager)
 	if err != nil {
-		return nil, errors.Wrap(err, "failed to create containerd client")
+		return nil, err
 	}
+
 	manager.blobStore, err = newBasicBlobStore(filepath.Join(manager.config.Root, "storage/blobs"))
 	if err != nil {
 		return nil, err
@@ -133,42 +141,37 @@
 	return filepath.Join(pm.config.Root, "tmp")
 }
 
-// StateChanged updates plugin internals using libcontainerd events.
-func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error {
-	logrus.Debugf("plugin state changed %s %#v", id, e)
+// HandleExitEvent is called when the executor receives the exit event
+// In the future we may change this, but for now all we care about is the exit event.
+func (pm *Manager) HandleExitEvent(id string) error {
+	p, err := pm.config.Store.GetV2Plugin(id)
+	if err != nil {
+		return err
+	}
 
-	switch e.State {
-	case libcontainerd.StateExit:
-		p, err := pm.config.Store.GetV2Plugin(id)
-		if err != nil {
-			return err
+	os.RemoveAll(filepath.Join(pm.config.ExecRoot, id))
+
+	if p.PropagatedMount != "" {
+		if err := mount.Unmount(p.PropagatedMount); err != nil {
+			logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err)
 		}
-
-		os.RemoveAll(filepath.Join(pm.config.ExecRoot, id))
-
-		if p.PropagatedMount != "" {
-			if err := mount.Unmount(p.PropagatedMount); err != nil {
-				logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err)
-			}
-			propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount")
-			if err := mount.Unmount(propRoot); err != nil {
-				logrus.Warn("Could not unmount %s: %v", propRoot, err)
-			}
-		}
-
-		pm.mu.RLock()
-		c := pm.cMap[p]
-		if c.exitChan != nil {
-			close(c.exitChan)
-		}
-		restart := c.restart
-		pm.mu.RUnlock()
-
-		if restart {
-			pm.enable(p, c, true)
+		propRoot := filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount")
+		if err := mount.Unmount(propRoot); err != nil {
+			logrus.Warn("Could not unmount %s: %v", propRoot, err)
 		}
 	}
 
+	pm.mu.RLock()
+	c := pm.cMap[p]
+	if c.exitChan != nil {
+		close(c.exitChan)
+	}
+	restart := c.restart
+	pm.mu.RUnlock()
+
+	if restart {
+		pm.enable(p, c, true)
+	}
 	return nil
 }
 
@@ -333,23 +336,10 @@
 	return nil
 }
 
-func attachToLog(id string) func(libcontainerd.IOPipe) error {
-	return func(iop libcontainerd.IOPipe) error {
-		iop.Stdin.Close()
-
-		logger := logrus.New()
-		logger.Hooks.Add(logHook{id})
-		// TODO: cache writer per id
-		w := logger.Writer()
-		go func() {
-			io.Copy(w, iop.Stdout)
-		}()
-		go func() {
-			// TODO: update logrus and use logger.WriterLevel
-			io.Copy(w, iop.Stderr)
-		}()
-		return nil
-	}
+func makeLoggerStreams(id string) (stdout, stderr io.WriteCloser) {
+	logger := logrus.New()
+	logger.Hooks.Add(logHook{id})
+	return logger.WriterLevel(logrus.InfoLevel), logger.WriterLevel(logrus.ErrorLevel)
 }
 
 func validatePrivileges(requiredPrivileges, privileges types.PluginPrivileges) error {
@@ -385,11 +375,11 @@
 	return reflect.DeepEqual(a.Value, b.Value)
 }
 
-func configToRootFS(c []byte) (*image.RootFS, layer.Platform, error) {
-	// TODO @jhowardmsft LCOW - Will need to revisit this. For now, calculate the platform.
-	platform := layer.Platform(runtime.GOOS)
+func configToRootFS(c []byte) (*image.RootFS, layer.OS, error) {
+	// TODO @jhowardmsft LCOW - Will need to revisit this. For now, calculate the operating system.
+	os := layer.OS(runtime.GOOS)
 	if system.LCOWSupported() {
-		platform = "linux"
+		os = "linux"
 	}
 	var pluginConfig types.PluginConfig
 	if err := json.Unmarshal(c, &pluginConfig); err != nil {
@@ -397,10 +387,10 @@
 	}
 	// validation for empty rootfs is in distribution code
 	if pluginConfig.Rootfs == nil {
-		return nil, platform, nil
+		return nil, os, nil
 	}
 
-	return rootFSFromPlugin(pluginConfig.Rootfs), platform, nil
+	return rootFSFromPlugin(pluginConfig.Rootfs), os, nil
 }
 
 func rootFSFromPlugin(pluginfs *types.PluginConfigRootfs) *image.RootFS {
diff --git a/plugin/manager_linux.go b/plugin/manager_linux.go
index 84bf606..eff21e1 100644
--- a/plugin/manager_linux.go
+++ b/plugin/manager_linux.go
@@ -11,7 +11,7 @@
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/daemon/initlayer"
-	"github.com/docker/docker/libcontainerd"
+	"github.com/docker/docker/pkg/containerfs"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/pkg/plugins"
@@ -23,7 +23,7 @@
 	"golang.org/x/sys/unix"
 )
 
-func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) error {
+func (pm *Manager) enable(p *v2.Plugin, c *controller, force bool) (err error) {
 	p.Rootfs = filepath.Join(pm.config.Root, p.PluginObj.ID, "rootfs")
 	if p.IsEnabled() && !force {
 		return errors.Wrap(enabledError(p.Name()), "plugin already enabled")
@@ -44,24 +44,26 @@
 	if p.PropagatedMount != "" {
 		propRoot = filepath.Join(filepath.Dir(p.Rootfs), "propagated-mount")
 
-		if err := os.MkdirAll(propRoot, 0755); err != nil {
+		if err = os.MkdirAll(propRoot, 0755); err != nil {
 			logrus.Errorf("failed to create PropagatedMount directory at %s: %v", propRoot, err)
 		}
 
-		if err := mount.MakeRShared(propRoot); err != nil {
+		if err = mount.MakeRShared(propRoot); err != nil {
 			return errors.Wrap(err, "error setting up propagated mount dir")
 		}
 
-		if err := mount.Mount(propRoot, p.PropagatedMount, "none", "rbind"); err != nil {
+		if err = mount.Mount(propRoot, p.PropagatedMount, "none", "rbind"); err != nil {
 			return errors.Wrap(err, "error creating mount for propagated mount")
 		}
 	}
 
-	if err := initlayer.Setup(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName), idtools.IDPair{0, 0}); err != nil {
+	rootFS := containerfs.NewLocalContainerFS(filepath.Join(pm.config.Root, p.PluginObj.ID, rootFSFileName))
+	if err := initlayer.Setup(rootFS, idtools.IDPair{0, 0}); err != nil {
 		return errors.WithStack(err)
 	}
 
-	if err := pm.containerdClient.Create(p.GetID(), "", "", *spec, attachToLog(p.GetID())); err != nil {
+	stdout, stderr := makeLoggerStreams(p.GetID())
+	if err := pm.executor.Create(p.GetID(), *spec, stdout, stderr); err != nil {
 		if p.PropagatedMount != "" {
 			if err := mount.Unmount(p.PropagatedMount); err != nil {
 				logrus.Warnf("Could not unmount %s: %v", p.PropagatedMount, err)
@@ -70,7 +72,6 @@
 				logrus.Warnf("Could not unmount %s: %v", propRoot, err)
 			}
 		}
-		return errors.WithStack(err)
 	}
 
 	return pm.pluginPostStart(p, c)
@@ -81,7 +82,7 @@
 	client, err := plugins.NewClientWithTimeout("unix://"+sockAddr, nil, time.Duration(c.timeoutInSecs)*time.Second)
 	if err != nil {
 		c.restart = false
-		shutdownPlugin(p, c, pm.containerdClient)
+		shutdownPlugin(p, c, pm.executor)
 		return errors.WithStack(err)
 	}
 
@@ -107,7 +108,7 @@
 			c.restart = false
 			// While restoring plugins, we need to explicitly set the state to disabled
 			pm.config.Store.SetState(p, false)
-			shutdownPlugin(p, c, pm.containerdClient)
+			shutdownPlugin(p, c, pm.executor)
 			return err
 		}
 
@@ -119,13 +120,14 @@
 }
 
 func (pm *Manager) restore(p *v2.Plugin) error {
-	if err := pm.containerdClient.Restore(p.GetID(), attachToLog(p.GetID())); err != nil {
+	stdout, stderr := makeLoggerStreams(p.GetID())
+	if err := pm.executor.Restore(p.GetID(), stdout, stderr); err != nil {
 		return err
 	}
 
 	if pm.config.LiveRestoreEnabled {
 		c := &controller{}
-		if pids, _ := pm.containerdClient.GetPidsForContainer(p.GetID()); len(pids) == 0 {
+		if isRunning, _ := pm.executor.IsRunning(p.GetID()); !isRunning {
 			// plugin is not running, so follow normal startup procedure
 			return pm.enable(p, c, true)
 		}
@@ -141,10 +143,10 @@
 	return nil
 }
 
-func shutdownPlugin(p *v2.Plugin, c *controller, containerdClient libcontainerd.Client) {
+func shutdownPlugin(p *v2.Plugin, c *controller, executor Executor) {
 	pluginID := p.GetID()
 
-	err := containerdClient.Signal(pluginID, int(unix.SIGTERM))
+	err := executor.Signal(pluginID, int(unix.SIGTERM))
 	if err != nil {
 		logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err)
 	} else {
@@ -153,9 +155,15 @@
 			logrus.Debug("Clean shutdown of plugin")
 		case <-time.After(time.Second * 10):
 			logrus.Debug("Force shutdown plugin")
-			if err := containerdClient.Signal(pluginID, int(unix.SIGKILL)); err != nil {
+			if err := executor.Signal(pluginID, int(unix.SIGKILL)); err != nil {
 				logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err)
 			}
+			select {
+			case <-c.exitChan:
+				logrus.Debug("SIGKILL plugin shutdown")
+			case <-time.After(time.Second * 10):
+				logrus.Debug("Force shutdown plugin FAILED")
+			}
 		}
 	}
 }
@@ -173,7 +181,7 @@
 	}
 
 	c.restart = false
-	shutdownPlugin(p, c, pm.containerdClient)
+	shutdownPlugin(p, c, pm.executor)
 	pm.config.Store.SetState(p, false)
 	return pm.save(p)
 }
@@ -190,9 +198,9 @@
 			logrus.Debug("Plugin active when liveRestore is set, skipping shutdown")
 			continue
 		}
-		if pm.containerdClient != nil && p.IsEnabled() {
+		if pm.executor != nil && p.IsEnabled() {
 			c.restart = false
-			shutdownPlugin(p, c, pm.containerdClient)
+			shutdownPlugin(p, c, pm.executor)
 		}
 	}
 	mount.Unmount(pm.config.Root)
diff --git a/plugin/store.go b/plugin/store.go
index adc0e26..b339814 100644
--- a/plugin/store.go
+++ b/plugin/store.go
@@ -111,19 +111,19 @@
 
 // Get returns an enabled plugin matching the given name and capability.
 func (ps *Store) Get(name, capability string, mode int) (plugingetter.CompatPlugin, error) {
-	var (
-		p   *v2.Plugin
-		err error
-	)
-
 	// Lookup using new model.
 	if ps != nil {
-		p, err = ps.GetV2Plugin(name)
+		p, err := ps.GetV2Plugin(name)
 		if err == nil {
-			p.AddRefCount(mode)
 			if p.IsEnabled() {
-				return p.FilterByCap(capability)
+				fp, err := p.FilterByCap(capability)
+				if err != nil {
+					return nil, err
+				}
+				p.AddRefCount(mode)
+				return fp, nil
 			}
+
 			// Plugin was found but it is disabled, so we should not fall back to legacy plugins
 			// but we should error out right away
 			return nil, errDisabled(name)
@@ -133,19 +133,18 @@
 		}
 	}
 
-	// Lookup using legacy model.
-	if allowV1PluginsFallback {
-		p, err := plugins.Get(name, capability)
-		if err != nil {
-			if errors.Cause(err) == plugins.ErrNotFound {
-				return nil, errNotFound(name)
-			}
-			return nil, errors.Wrap(systemError{err}, "legacy plugin")
-		}
-		return p, nil
+	if !allowV1PluginsFallback {
+		return nil, errNotFound(name)
 	}
 
-	return nil, err
+	p, err := plugins.Get(name, capability)
+	if err == nil {
+		return p, nil
+	}
+	if errors.Cause(err) == plugins.ErrNotFound {
+		return nil, errNotFound(name)
+	}
+	return nil, errors.Wrap(systemError{err}, "legacy plugin")
 }
 
 // GetAllManagedPluginsByCap returns a list of managed plugins matching the given capability.
diff --git a/plugin/store_test.go b/plugin/store_test.go
index d3876da..5c61cc6 100644
--- a/plugin/store_test.go
+++ b/plugin/store_test.go
@@ -4,6 +4,7 @@
 	"testing"
 
 	"github.com/docker/docker/api/types"
+	"github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/plugin/v2"
 )
 
@@ -31,3 +32,33 @@
 		t.Fatalf("expected no error, got %v", err)
 	}
 }
+
+func TestStoreGetPluginNotMatchCapRefs(t *testing.T) {
+	s := NewStore()
+	p := v2.Plugin{PluginObj: types.Plugin{Name: "test:latest"}}
+
+	iType := types.PluginInterfaceType{Capability: "whatever", Prefix: "docker", Version: "1.0"}
+	i := types.PluginConfigInterface{Socket: "plugins.sock", Types: []types.PluginInterfaceType{iType}}
+	p.PluginObj.Config.Interface = i
+
+	if err := s.Add(&p); err != nil {
+		t.Fatal(err)
+	}
+
+	if _, err := s.Get("test", "volumedriver", plugingetter.Acquire); err == nil {
+		t.Fatal("exepcted error when getting plugin that doesn't match the passed in capability")
+	}
+
+	if refs := p.GetRefCount(); refs != 0 {
+		t.Fatalf("reference count should be 0, got: %d", refs)
+	}
+
+	p.PluginObj.Enabled = true
+	if _, err := s.Get("test", "volumedriver", plugingetter.Acquire); err == nil {
+		t.Fatal("exepcted error when getting plugin that doesn't match the passed in capability")
+	}
+
+	if refs := p.GetRefCount(); refs != 0 {
+		t.Fatalf("reference count should be 0, got: %d", refs)
+	}
+}
diff --git a/profiles/apparmor/template.go b/profiles/apparmor/template.go
index c5ea458..741da9c 100644
--- a/profiles/apparmor/template.go
+++ b/profiles/apparmor/template.go
@@ -24,8 +24,6 @@
   deny @{PROC}/sys/[^k]** w,  # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel)
   deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w,  # deny everything except shm* in /proc/sys/kernel/
   deny @{PROC}/sysrq-trigger rwklx,
-  deny @{PROC}/mem rwklx,
-  deny @{PROC}/kmem rwklx,
   deny @{PROC}/kcore rwklx,
 
   deny mount,
diff --git a/project/RELEASE-PROCESS.md b/project/RELEASE-PROCESS.md
index d764e9d..8270a6e 100644
--- a/project/RELEASE-PROCESS.md
+++ b/project/RELEASE-PROCESS.md
@@ -45,7 +45,7 @@
 - The earlier a PR is opened, the more time the maintainers have to review. For
 example, if a PR is opened the day before the freeze date, it’s very unlikely
 that it will be merged for the release.
-- Constant communication with the maintainers (mailing-list, IRC, Github issues,
+- Constant communication with the maintainers (mailing-list, IRC, GitHub issues,
 etc.) allows to get early feedback on the design before getting into the
 implementation, which usually reduces the time needed to discuss a changeset.
 - If the code is commented, fully tested and by extension follows every single
diff --git a/project/REVIEWING.md b/project/REVIEWING.md
index 51ef4c5..cac3f5d 100644
--- a/project/REVIEWING.md
+++ b/project/REVIEWING.md
@@ -68,7 +68,7 @@
 
  - Has DCO
  - Contains sufficient justification (e.g., usecases) for the proposed change
- - References the Github issue it fixes (if any) in the commit or the first Github comment
+ - References the GitHub issue it fixes (if any) in the commit or the first GitHub comment
 
 Possible transitions from this state:
 
diff --git a/reference/store_test.go b/reference/store_test.go
index 8f0ff63..72a19d6 100644
--- a/reference/store_test.go
+++ b/reference/store_test.go
@@ -9,7 +9,9 @@
 	"testing"
 
 	"github.com/docker/distribution/reference"
-	"github.com/opencontainers/go-digest"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/stretchr/testify/assert"
+	"github.com/stretchr/testify/require"
 )
 
 var (
@@ -62,10 +64,10 @@
 
 func TestSave(t *testing.T) {
 	jsonFile, err := ioutil.TempFile("", "tag-store-test")
-	if err != nil {
-		t.Fatalf("error creating temp file: %v", err)
-	}
+	require.NoError(t, err)
+
 	_, err = jsonFile.Write([]byte(`{}`))
+	require.NoError(t, err)
 	jsonFile.Close()
 	defer os.RemoveAll(jsonFile.Name())
 
@@ -304,19 +306,19 @@
 	}
 
 	// Delete a few references
-	if deleted, err := store.Delete(ref1); err != nil || deleted != true {
+	if deleted, err := store.Delete(ref1); err != nil || !deleted {
 		t.Fatal("Delete failed")
 	}
 	if _, err := store.Get(ref1); err != ErrDoesNotExist {
 		t.Fatal("Expected ErrDoesNotExist from Get")
 	}
-	if deleted, err := store.Delete(ref5); err != nil || deleted != true {
+	if deleted, err := store.Delete(ref5); err != nil || !deleted {
 		t.Fatal("Delete failed")
 	}
 	if _, err := store.Get(ref5); err != ErrDoesNotExist {
 		t.Fatal("Expected ErrDoesNotExist from Get")
 	}
-	if deleted, err := store.Delete(nameOnly); err != nil || deleted != true {
+	if deleted, err := store.Delete(nameOnly); err != nil || !deleted {
 		t.Fatal("Delete failed")
 	}
 	if _, err := store.Get(nameOnly); err != ErrDoesNotExist {
@@ -326,32 +328,23 @@
 
 func TestInvalidTags(t *testing.T) {
 	tmpDir, err := ioutil.TempDir("", "tag-store-test")
+	require.NoError(t, err)
 	defer os.RemoveAll(tmpDir)
 
 	store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json"))
-	if err != nil {
-		t.Fatalf("error creating tag store: %v", err)
-	}
+	require.NoError(t, err)
 	id := digest.Digest("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6")
 
 	// sha256 as repo name
 	ref, err := reference.ParseNormalizedNamed("sha256:abc")
-	if err != nil {
-		t.Fatal(err)
-	}
+	require.NoError(t, err)
 	err = store.AddTag(ref, id, true)
-	if err == nil {
-		t.Fatalf("expected setting tag %q to fail", ref)
-	}
+	assert.Error(t, err)
 
 	// setting digest as a tag
 	ref, err = reference.ParseNormalizedNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6")
-	if err != nil {
-		t.Fatal(err)
-	}
-	err = store.AddTag(ref, id, true)
-	if err == nil {
-		t.Fatalf("expected setting tag %q to fail", ref)
-	}
+	require.NoError(t, err)
 
+	err = store.AddTag(ref, id, true)
+	assert.Error(t, err)
 }
diff --git a/registry/auth.go b/registry/auth.go
index b0a03d0..11937d8 100644
--- a/registry/auth.go
+++ b/registry/auth.go
@@ -125,7 +125,7 @@
 func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) {
 	logrus.Debugf("attempting v2 login to registry endpoint %s", strings.TrimRight(endpoint.URL.String(), "/")+"/v2/")
 
-	modifiers := DockerHeaders(userAgent, nil)
+	modifiers := Headers(userAgent, nil)
 	authTransport := transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...)
 
 	credentialAuthConfig := *authConfig
diff --git a/registry/auth_test.go b/registry/auth_test.go
index 9ab71aa..f5f213b 100644
--- a/registry/auth_test.go
+++ b/registry/auth_test.go
@@ -1,7 +1,3 @@
-// +build !solaris
-
-// TODO: Support Solaris
-
 package registry
 
 import (
diff --git a/registry/config.go b/registry/config.go
index 70efb43..5246faa 100644
--- a/registry/config.go
+++ b/registry/config.go
@@ -60,7 +60,7 @@
 	// not have the correct form
 	ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")")
 
-	emptyServiceConfig = newServiceConfig(ServiceOptions{})
+	emptyServiceConfig, _ = newServiceConfig(ServiceOptions{})
 )
 
 var (
@@ -71,22 +71,27 @@
 var lookupIP = net.LookupIP
 
 // newServiceConfig returns a new instance of ServiceConfig
-func newServiceConfig(options ServiceOptions) *serviceConfig {
+func newServiceConfig(options ServiceOptions) (*serviceConfig, error) {
 	config := &serviceConfig{
 		ServiceConfig: registrytypes.ServiceConfig{
 			InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0),
-			IndexConfigs:          make(map[string]*registrytypes.IndexInfo, 0),
+			IndexConfigs:          make(map[string]*registrytypes.IndexInfo),
 			// Hack: Bypass setting the mirrors to IndexConfigs since they are going away
 			// and Mirrors are only for the official registry anyways.
 		},
 		V2Only: options.V2Only,
 	}
+	if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil {
+		return nil, err
+	}
+	if err := config.LoadMirrors(options.Mirrors); err != nil {
+		return nil, err
+	}
+	if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil {
+		return nil, err
+	}
 
-	config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts)
-	config.LoadMirrors(options.Mirrors)
-	config.LoadInsecureRegistries(options.InsecureRegistries)
-
-	return config
+	return config, nil
 }
 
 // LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config.
@@ -171,7 +176,7 @@
 	originalIndexInfos := config.ServiceConfig.IndexConfigs
 
 	config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0)
-	config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo, 0)
+	config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo)
 
 skip:
 	for _, r := range registries {
diff --git a/registry/config_test.go b/registry/config_test.go
index 8cb7e5a..40aa03d 100644
--- a/registry/config_test.go
+++ b/registry/config_test.go
@@ -5,6 +5,8 @@
 	"sort"
 	"strings"
 	"testing"
+
+	"github.com/stretchr/testify/assert"
 )
 
 func TestLoadAllowNondistributableArtifacts(t *testing.T) {
@@ -90,7 +92,7 @@
 		},
 	}
 	for _, testCase := range testCases {
-		config := newServiceConfig(ServiceOptions{})
+		config := emptyServiceConfig
 		err := config.LoadAllowNondistributableArtifacts(testCase.registries)
 		if testCase.err == "" {
 			if err != nil {
@@ -233,7 +235,7 @@
 		},
 	}
 	for _, testCase := range testCases {
-		config := newServiceConfig(ServiceOptions{})
+		config := emptyServiceConfig
 		err := config.LoadInsecureRegistries(testCase.registries)
 		if testCase.err == "" {
 			if err != nil {
@@ -258,3 +260,121 @@
 		}
 	}
 }
+
+func TestNewServiceConfig(t *testing.T) {
+	testCases := []struct {
+		opts   ServiceOptions
+		errStr string
+	}{
+		{
+			ServiceOptions{},
+			"",
+		},
+		{
+			ServiceOptions{
+				Mirrors: []string{"example.com:5000"},
+			},
+			`invalid mirror: unsupported scheme "example.com" in "example.com:5000"`,
+		},
+		{
+			ServiceOptions{
+				Mirrors: []string{"http://example.com:5000"},
+			},
+			"",
+		},
+		{
+			ServiceOptions{
+				InsecureRegistries: []string{"[fe80::]/64"},
+			},
+			`insecure registry [fe80::]/64 is not valid: invalid host "[fe80::]/64"`,
+		},
+		{
+			ServiceOptions{
+				InsecureRegistries: []string{"102.10.8.1/24"},
+			},
+			"",
+		},
+		{
+			ServiceOptions{
+				AllowNondistributableArtifacts: []string{"[fe80::]/64"},
+			},
+			`allow-nondistributable-artifacts registry [fe80::]/64 is not valid: invalid host "[fe80::]/64"`,
+		},
+		{
+			ServiceOptions{
+				AllowNondistributableArtifacts: []string{"102.10.8.1/24"},
+			},
+			"",
+		},
+	}
+
+	for _, testCase := range testCases {
+		_, err := newServiceConfig(testCase.opts)
+		if testCase.errStr != "" {
+			assert.EqualError(t, err, testCase.errStr)
+		} else {
+			assert.Nil(t, err)
+		}
+	}
+}
+
+func TestValidateIndexName(t *testing.T) {
+	valid := []struct {
+		index  string
+		expect string
+	}{
+		{
+			index:  "index.docker.io",
+			expect: "docker.io",
+		},
+		{
+			index:  "example.com",
+			expect: "example.com",
+		},
+		{
+			index:  "127.0.0.1:8080",
+			expect: "127.0.0.1:8080",
+		},
+		{
+			index:  "mytest-1.com",
+			expect: "mytest-1.com",
+		},
+		{
+			index:  "mirror-1.com/v1/?q=foo",
+			expect: "mirror-1.com/v1/?q=foo",
+		},
+	}
+
+	for _, testCase := range valid {
+		result, err := ValidateIndexName(testCase.index)
+		if assert.NoError(t, err) {
+			assert.Equal(t, testCase.expect, result)
+		}
+
+	}
+
+}
+
+func TestValidateIndexNameWithError(t *testing.T) {
+	invalid := []struct {
+		index string
+		err   string
+	}{
+		{
+			index: "docker.io-",
+			err:   "invalid index name (docker.io-). Cannot begin or end with a hyphen",
+		},
+		{
+			index: "-example.com",
+			err:   "invalid index name (-example.com). Cannot begin or end with a hyphen",
+		},
+		{
+			index: "mirror-1.com/v1/?q=foo-",
+			err:   "invalid index name (mirror-1.com/v1/?q=foo-). Cannot begin or end with a hyphen",
+		},
+	}
+	for _, testCase := range invalid {
+		_, err := ValidateIndexName(testCase.index)
+		assert.EqualError(t, err, testCase.err)
+	}
+}
diff --git a/registry/endpoint_v1.go b/registry/endpoint_v1.go
index d6a51bf..9b4838a 100644
--- a/registry/endpoint_v1.go
+++ b/registry/endpoint_v1.go
@@ -77,7 +77,7 @@
 
 	// TODO(tiborvass): make sure a ConnectTimeout transport is used
 	tr := NewTransport(tlsConfig)
-	endpoint.client = HTTPClient(transport.NewTransport(tr, DockerHeaders(userAgent, metaHeaders)...))
+	endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...))
 	return endpoint
 }
 
diff --git a/registry/registry.go b/registry/registry.go
index 5fef0db..fc49a6e 100644
--- a/registry/registry.go
+++ b/registry/registry.go
@@ -102,8 +102,8 @@
 	return nil
 }
 
-// DockerHeaders returns request modifiers with a User-Agent and metaHeaders
-func DockerHeaders(userAgent string, metaHeaders http.Header) []transport.RequestModifier {
+// Headers returns request modifiers with a User-Agent and metaHeaders
+func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier {
 	modifiers := []transport.RequestModifier{}
 	if userAgent != "" {
 		modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{
diff --git a/registry/registry_mock_test.go b/registry/registry_mock_test.go
index 204a983..f814273 100644
--- a/registry/registry_mock_test.go
+++ b/registry/registry_mock_test.go
@@ -1,5 +1,3 @@
-// +build !solaris
-
 package registry
 
 import (
@@ -175,7 +173,7 @@
 	return index
 }
 
-func makeServiceConfig(mirrors []string, insecureRegistries []string) *serviceConfig {
+func makeServiceConfig(mirrors []string, insecureRegistries []string) (*serviceConfig, error) {
 	options := ServiceOptions{
 		Mirrors:            mirrors,
 		InsecureRegistries: insecureRegistries,
diff --git a/registry/registry_test.go b/registry/registry_test.go
index d89c46c..56e362f 100644
--- a/registry/registry_test.go
+++ b/registry/registry_test.go
@@ -1,5 +1,3 @@
-// +build !solaris
-
 package registry
 
 import (
@@ -14,6 +12,7 @@
 	"github.com/docker/distribution/registry/client/transport"
 	"github.com/docker/docker/api/types"
 	registrytypes "github.com/docker/docker/api/types/registry"
+	"github.com/stretchr/testify/assert"
 )
 
 var (
@@ -33,7 +32,7 @@
 	}
 	userAgent := "docker test client"
 	var tr http.RoundTripper = debugTransport{NewTransport(nil), t.Log}
-	tr = transport.NewTransport(AuthTransport(tr, authConfig, false), DockerHeaders(userAgent, nil)...)
+	tr = transport.NewTransport(AuthTransport(tr, authConfig, false), Headers(userAgent, nil)...)
 	client := HTTPClient(tr)
 	r, err := NewSession(client, authConfig, endpoint)
 	if err != nil {
@@ -539,7 +538,7 @@
 		}
 	}
 
-	config := newServiceConfig(ServiceOptions{})
+	config := emptyServiceConfig
 	noMirrors := []string{}
 	expectedIndexInfos := map[string]*registrytypes.IndexInfo{
 		IndexName: {
@@ -570,7 +569,11 @@
 	testIndexInfo(config, expectedIndexInfos)
 
 	publicMirrors := []string{"http://mirror1.local", "http://mirror2.local"}
-	config = makeServiceConfig(publicMirrors, []string{"example.com"})
+	var err error
+	config, err = makeServiceConfig(publicMirrors, []string{"example.com"})
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	expectedIndexInfos = map[string]*registrytypes.IndexInfo{
 		IndexName: {
@@ -618,7 +621,10 @@
 	}
 	testIndexInfo(config, expectedIndexInfos)
 
-	config = makeServiceConfig(nil, []string{"42.42.0.0/16"})
+	config, err = makeServiceConfig(nil, []string{"42.42.0.0/16"})
+	if err != nil {
+		t.Fatal(err)
+	}
 	expectedIndexInfos = map[string]*registrytypes.IndexInfo{
 		"example.com": {
 			Name:     "example.com",
@@ -663,7 +669,11 @@
 		}
 		return false
 	}
-	s := DefaultService{config: makeServiceConfig([]string{"https://my.mirror"}, nil)}
+	cfg, err := makeServiceConfig([]string{"https://my.mirror"}, nil)
+	if err != nil {
+		t.Fatal(err)
+	}
+	s := DefaultService{config: cfg}
 
 	imageName, err := reference.WithName(IndexName + "/test/image")
 	if err != nil {
@@ -747,16 +757,12 @@
 func TestTrustedLocation(t *testing.T) {
 	for _, url := range []string{"http://example.com", "https://example.com:7777", "http://docker.io", "http://test.docker.com", "https://fakedocker.com"} {
 		req, _ := http.NewRequest("GET", url, nil)
-		if trustedLocation(req) == true {
-			t.Fatalf("'%s' shouldn't be detected as a trusted location", url)
-		}
+		assert.False(t, trustedLocation(req))
 	}
 
 	for _, url := range []string{"https://docker.io", "https://test.docker.com:80"} {
 		req, _ := http.NewRequest("GET", url, nil)
-		if trustedLocation(req) == false {
-			t.Fatalf("'%s' should be detected as a trusted location", url)
-		}
+		assert.True(t, trustedLocation(req))
 	}
 }
 
@@ -844,9 +850,12 @@
 		{"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, true},
 	}
 	for _, tt := range tests {
-		config := newServiceConfig(ServiceOptions{
+		config, err := newServiceConfig(ServiceOptions{
 			AllowNondistributableArtifacts: tt.registries,
 		})
+		if err != nil {
+			t.Error(err)
+		}
 		if v := allowNondistributableArtifacts(config, tt.addr); v != tt.expected {
 			t.Errorf("allowNondistributableArtifacts failed for %q %v, expected %v got %v", tt.addr, tt.registries, tt.expected, v)
 		}
@@ -886,7 +895,10 @@
 		{"invalid.domain.com:5000", []string{"invalid.domain.com:5000"}, false},
 	}
 	for _, tt := range tests {
-		config := makeServiceConfig(nil, tt.insecureRegistries)
+		config, err := makeServiceConfig(nil, tt.insecureRegistries)
+		if err != nil {
+			t.Error(err)
+		}
 		if sec := isSecureIndex(config, tt.addr); sec != tt.expected {
 			t.Errorf("isSecureIndex failed for %q %v, expected %v got %v", tt.addr, tt.insecureRegistries, tt.expected, sec)
 		}
diff --git a/registry/service.go b/registry/service.go
index d36b11f..f3f1b4a 100644
--- a/registry/service.go
+++ b/registry/service.go
@@ -45,10 +45,10 @@
 
 // NewService returns a new instance of DefaultService ready to be
 // installed into an engine.
-func NewService(options ServiceOptions) *DefaultService {
-	return &DefaultService{
-		config: newServiceConfig(options),
-	}
+func NewService(options ServiceOptions) (*DefaultService, error) {
+	config, err := newServiceConfig(options)
+
+	return &DefaultService{config: config}, err
 }
 
 // ServiceConfig returns the public registry service configuration.
@@ -199,7 +199,7 @@
 			},
 		}
 
-		modifiers := DockerHeaders(userAgent, nil)
+		modifiers := Headers(userAgent, nil)
 		v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes)
 		if err != nil {
 			if fErr, ok := err.(fallbackError); ok {
diff --git a/registry/service_v1_test.go b/registry/service_v1_test.go
index bd15dff..6ea846e 100644
--- a/registry/service_v1_test.go
+++ b/registry/service_v1_test.go
@@ -3,7 +3,10 @@
 import "testing"
 
 func TestLookupV1Endpoints(t *testing.T) {
-	s := NewService(ServiceOptions{})
+	s, err := NewService(ServiceOptions{})
+	if err != nil {
+		t.Fatal(err)
+	}
 
 	cases := []struct {
 		hostname    string
diff --git a/runconfig/config.go b/runconfig/config.go
index 3d236de..56eb946 100644
--- a/runconfig/config.go
+++ b/runconfig/config.go
@@ -7,8 +7,6 @@
 	"github.com/docker/docker/api/types/container"
 	networktypes "github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/pkg/sysinfo"
-	"github.com/docker/docker/volume"
-	"github.com/pkg/errors"
 )
 
 // ContainerDecoder implements httputils.ContainerDecoder
@@ -46,11 +44,6 @@
 		if w.Config.Volumes == nil {
 			w.Config.Volumes = make(map[string]struct{})
 		}
-
-		// Now validate all the volumes and binds
-		if err := validateMountSettings(w.Config, hc); err != nil {
-			return nil, nil, nil, err
-		}
 	}
 
 	// Certain parameters need daemon-side validation that cannot be done
@@ -86,23 +79,3 @@
 
 	return w.Config, hc, w.NetworkingConfig, nil
 }
-
-// validateMountSettings validates each of the volumes and bind settings
-// passed by the caller to ensure they are valid.
-func validateMountSettings(c *container.Config, hc *container.HostConfig) error {
-	// it is ok to have len(hc.Mounts) > 0 && (len(hc.Binds) > 0 || len (c.Volumes) > 0 || len (hc.Tmpfs) > 0 )
-
-	// Ensure all volumes and binds are valid.
-	for spec := range c.Volumes {
-		if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil {
-			return errors.Wrapf(err, "invalid volume spec %q", spec)
-		}
-	}
-	for _, spec := range hc.Binds {
-		if _, err := volume.ParseMountRaw(spec, hc.VolumeDriver); err != nil {
-			return errors.Wrapf(err, "invalid bind mount spec %q", spec)
-		}
-	}
-
-	return nil
-}
diff --git a/runconfig/config_test.go b/runconfig/config_test.go
index ebd74ea..a3630a0 100644
--- a/runconfig/config_test.go
+++ b/runconfig/config_test.go
@@ -9,12 +9,9 @@
 	"strings"
 	"testing"
 
-	"os"
-
 	"github.com/docker/docker/api/types/container"
 	networktypes "github.com/docker/docker/api/types/network"
 	"github.com/docker/docker/api/types/strslice"
-	"github.com/gotestyourself/gotestyourself/skip"
 	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 )
@@ -31,11 +28,6 @@
 		image    string
 	)
 
-	//TODO: Should run for Solaris
-	if runtime.GOOS == "solaris" {
-		t.Skip()
-	}
-
 	if runtime.GOOS != "windows" {
 		image = "ubuntu"
 		fixtures = []f{
@@ -143,103 +135,6 @@
 	return decodeContainerConfig(bytes.NewReader(b))
 }
 
-func TestDecodeContainerConfigWithVolumes(t *testing.T) {
-	var testcases = []decodeConfigTestcase{
-		{
-			doc:         "no paths volume",
-			wrapper:     containerWrapperWithVolume(":"),
-			expectedErr: `invalid volume specification: ':'`,
-		},
-		{
-			doc:         "no paths bind",
-			wrapper:     containerWrapperWithBind(":"),
-			expectedErr: `invalid volume specification: ':'`,
-		},
-		{
-			doc:         "no paths or mode volume",
-			wrapper:     containerWrapperWithVolume("::"),
-			expectedErr: `invalid volume specification: '::'`,
-		},
-		{
-			doc:         "no paths or mode bind",
-			wrapper:     containerWrapperWithBind("::"),
-			expectedErr: `invalid volume specification: '::'`,
-		},
-	}
-	for _, testcase := range testcases {
-		t.Run(testcase.doc, runDecodeContainerConfigTestCase(testcase))
-	}
-}
-
-func TestDecodeContainerConfigWithVolumesUnix(t *testing.T) {
-	skip.IfCondition(t, runtime.GOOS == "windows")
-
-	baseErr := `invalid mount config for type "volume": invalid specification: `
-	var testcases = []decodeConfigTestcase{
-		{
-			doc:         "root to root volume",
-			wrapper:     containerWrapperWithVolume("/:/"),
-			expectedErr: `invalid volume specification: '/:/'`,
-		},
-		{
-			doc:         "root to root bind",
-			wrapper:     containerWrapperWithBind("/:/"),
-			expectedErr: `invalid volume specification: '/:/'`,
-		},
-		{
-			doc:         "no destination path volume",
-			wrapper:     containerWrapperWithVolume(`/tmp:`),
-			expectedErr: ` invalid volume specification: '/tmp:'`,
-		},
-		{
-			doc:         "no destination path bind",
-			wrapper:     containerWrapperWithBind(`/tmp:`),
-			expectedErr: ` invalid volume specification: '/tmp:'`,
-		},
-		{
-			doc:         "no destination path or mode volume",
-			wrapper:     containerWrapperWithVolume(`/tmp::`),
-			expectedErr: `invalid mount config for type "bind": field Target must not be empty`,
-		},
-		{
-			doc:         "no destination path or mode bind",
-			wrapper:     containerWrapperWithBind(`/tmp::`),
-			expectedErr: `invalid mount config for type "bind": field Target must not be empty`,
-		},
-		{
-			doc:         "too many sections volume",
-			wrapper:     containerWrapperWithVolume(`/tmp:/tmp:/tmp:/tmp`),
-			expectedErr: `invalid volume specification: '/tmp:/tmp:/tmp:/tmp'`,
-		},
-		{
-			doc:         "too many sections bind",
-			wrapper:     containerWrapperWithBind(`/tmp:/tmp:/tmp:/tmp`),
-			expectedErr: `invalid volume specification: '/tmp:/tmp:/tmp:/tmp'`,
-		},
-		{
-			doc:         "just root volume",
-			wrapper:     containerWrapperWithVolume("/"),
-			expectedErr: baseErr + `destination can't be '/'`,
-		},
-		{
-			doc:         "just root bind",
-			wrapper:     containerWrapperWithBind("/"),
-			expectedErr: baseErr + `destination can't be '/'`,
-		},
-		{
-			doc:     "bind mount passed as a volume",
-			wrapper: containerWrapperWithVolume(`/foo:/bar`),
-			expectedConfig: &container.Config{
-				Volumes: map[string]struct{}{`/foo:/bar`: {}},
-			},
-			expectedHostConfig: &container.HostConfig{NetworkMode: "default"},
-		},
-	}
-	for _, testcase := range testcases {
-		t.Run(testcase.doc, runDecodeContainerConfigTestCase(testcase))
-	}
-}
-
 type decodeConfigTestcase struct {
 	doc                string
 	wrapper            ContainerConfigWrapper
@@ -266,89 +161,6 @@
 	}
 }
 
-func TestDecodeContainerConfigWithVolumesWindows(t *testing.T) {
-	skip.IfCondition(t, runtime.GOOS != "windows")
-
-	tmpDir := os.Getenv("TEMP")
-	systemDrive := os.Getenv("SystemDrive")
-	var testcases = []decodeConfigTestcase{
-		{
-			doc:         "root to root volume",
-			wrapper:     containerWrapperWithVolume(systemDrive + `\:c:\`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "root to root bind",
-			wrapper:     containerWrapperWithBind(systemDrive + `\:c:\`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "no destination path volume",
-			wrapper:     containerWrapperWithVolume(tmpDir + `\:`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "no destination path bind",
-			wrapper:     containerWrapperWithBind(tmpDir + `\:`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "no destination path or mode volume",
-			wrapper:     containerWrapperWithVolume(tmpDir + `\::`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "no destination path or mode bind",
-			wrapper:     containerWrapperWithBind(tmpDir + `\::`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "too many sections volume",
-			wrapper:     containerWrapperWithVolume(tmpDir + ":" + tmpDir + ":" + tmpDir + ":" + tmpDir),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "too many sections bind",
-			wrapper:     containerWrapperWithBind(tmpDir + ":" + tmpDir + ":" + tmpDir + ":" + tmpDir),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "no drive letter volume",
-			wrapper:     containerWrapperWithVolume(`\tmp`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "no drive letter bind",
-			wrapper:     containerWrapperWithBind(`\tmp`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "root to c-drive volume",
-			wrapper:     containerWrapperWithVolume(systemDrive + `\:c:`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "root to c-drive bind",
-			wrapper:     containerWrapperWithBind(systemDrive + `\:c:`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "container path without driver letter volume",
-			wrapper:     containerWrapperWithVolume(`c:\windows:\somewhere`),
-			expectedErr: `invalid volume specification: `,
-		},
-		{
-			doc:         "container path without driver letter bind",
-			wrapper:     containerWrapperWithBind(`c:\windows:\somewhere`),
-			expectedErr: `invalid volume specification: `,
-		},
-	}
-
-	for _, testcase := range testcases {
-		t.Run(testcase.doc, runDecodeContainerConfigTestCase(testcase))
-	}
-}
-
 func marshal(t *testing.T, w ContainerConfigWrapper, doc string) []byte {
 	b, err := json.Marshal(w)
 	require.NoError(t, err, "%s: failed to encode config wrapper", doc)
diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go
index 3a90c84..cfc5383 100644
--- a/runconfig/hostconfig.go
+++ b/runconfig/hostconfig.go
@@ -68,7 +68,7 @@
 		return ErrConflictContainerNetworkAndMac
 	}
 
-	if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) {
+	if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts) {
 		return ErrConflictNetworkPublishPorts
 	}
 
diff --git a/runconfig/hostconfig_solaris.go b/runconfig/hostconfig_solaris.go
deleted file mode 100644
index 5b6e13d..0000000
--- a/runconfig/hostconfig_solaris.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package runconfig
-
-import (
-	"github.com/docker/docker/api/types/container"
-	"github.com/docker/docker/pkg/sysinfo"
-)
-
-// DefaultDaemonNetworkMode returns the default network stack the daemon should
-// use.
-func DefaultDaemonNetworkMode() container.NetworkMode {
-	return container.NetworkMode("bridge")
-}
-
-// IsPreDefinedNetwork indicates if a network is predefined by the daemon
-func IsPreDefinedNetwork(network string) bool {
-	return false
-}
-
-// validateNetMode ensures that the various combinations of requested
-// network settings are valid.
-func validateNetMode(c *container.Config, hc *container.HostConfig) error {
-	// We may not be passed a host config, such as in the case of docker commit
-	return nil
-}
-
-// validateIsolation performs platform specific validation of the
-// isolation level in the hostconfig structure.
-// This setting is currently discarded for Solaris so this is a no-op.
-func validateIsolation(hc *container.HostConfig) error {
-	return nil
-}
-
-// validateQoS performs platform specific validation of the QoS settings
-func validateQoS(hc *container.HostConfig) error {
-	return nil
-}
-
-// validateResources performs platform specific validation of the resource settings
-func validateResources(hc *container.HostConfig, si *sysinfo.SysInfo) error {
-	return nil
-}
-
-// validatePrivileged performs platform specific validation of the Privileged setting
-func validatePrivileged(hc *container.HostConfig) error {
-	return nil
-}
diff --git a/runconfig/hostconfig_test.go b/runconfig/hostconfig_test.go
index b461c16..76c3fa2 100644
--- a/runconfig/hostconfig_test.go
+++ b/runconfig/hostconfig_test.go
@@ -195,9 +195,7 @@
 			t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err))
 		}
 
-		if c.Privileged != false {
-			t.Fatalf("Expected privileged false, found %v\n", c.Privileged)
-		}
+		assert.False(t, c.Privileged)
 
 		if l := len(c.Binds); l != 1 {
 			t.Fatalf("Expected 1 bind, found %d\n", l)
diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go
index 55df5da..3527d29 100644
--- a/runconfig/hostconfig_unix.go
+++ b/runconfig/hostconfig_unix.go
@@ -1,4 +1,4 @@
-// +build !windows,!solaris
+// +build !windows
 
 package runconfig
 
diff --git a/vendor.conf b/vendor.conf
index 535adad..dc94fb4 100644
--- a/vendor.conf
+++ b/vendor.conf
@@ -1,21 +1,20 @@
 # the following lines are in sorted order, FYI
-github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
-github.com/Microsoft/hcsshim v0.6.3
+github.com/Azure/go-ansiterm d6e3b3328b783f23731bc4d058875b0371ff8109
+github.com/Microsoft/hcsshim v0.6.5
 github.com/Microsoft/go-winio v0.4.5
-github.com/moby/buildkit da2b9dc7dab99e824b2b1067ad7d0523e32dd2d9 https://github.com/dmcgowan/buildkit.git
 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76
 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
 github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git
 github.com/gorilla/context v1.1
 github.com/gorilla/mux v1.1
-github.com/Microsoft/opengcs v0.3.2
+github.com/Microsoft/opengcs v0.3.4
 github.com/kr/pty 5cf931ef8f
 github.com/mattn/go-shellwords v1.0.3
-github.com/sirupsen/logrus v1.0.1
+github.com/sirupsen/logrus v1.0.3
 github.com/tchap/go-patricia v2.2.6
 github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
-golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5
+golang.org/x/sys 95c6576299259db960f6c5b9b69ea52422860fce
 github.com/docker/go-units 9e638d38cf6977a37a8ea0078f3ee75a7cdb2dd1
 github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
 golang.org/x/text f72d8390a633d5dfb0cc84043294db9f6c935756
@@ -27,8 +26,11 @@
 github.com/imdario/mergo 0.2.1
 golang.org/x/sync de49d9dcd27d4f764488181bea099dfe6179bcf0
 
+github.com/moby/buildkit aaff9d591ef128560018433fe61beb802e149de8
+github.com/tonistiigi/fsutil dea3a0da73aee887fc02142d995be764106ac5e2
+
 #get libnetwork packages
-github.com/docker/libnetwork 5b28c0ec98236c489e39ae6a9e1aeb802e071681
+github.com/docker/libnetwork 72fd7e5495eba86e28012e39b5ed63ef9ca9a97b
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
@@ -53,7 +55,7 @@
 
 # get graph and distribution packages
 github.com/docker/distribution edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c
-github.com/vbatts/tar-split v0.10.1
+github.com/vbatts/tar-split v0.10.2
 github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb
 
 # get go-zfs packages
@@ -63,20 +65,19 @@
 google.golang.org/grpc v1.3.0
 
 # When updating, also update RUNC_COMMIT in hack/dockerfile/binaries-commits accordingly
-github.com/opencontainers/runc 3f2f8b84a77f73d38244dd690525642a72156c64
-github.com/opencontainers/image-spec 372ad780f63454fbbbbcc7cf80e5b90245c13e13
+github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
 github.com/opencontainers/runtime-spec v1.0.0
-
+github.com/opencontainers/image-spec v1.0.0
 github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0
 
 # libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json)
-github.com/coreos/go-systemd v4
+github.com/coreos/go-systemd v15
 github.com/godbus/dbus v4.0.0
 github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
 github.com/golang/protobuf 7a211bcf3bce0e3f1d74f9894916e6f116ae83b4
 
 # gelf logging driver deps
-github.com/Graylog2/go-gelf 7029da823dad4ef3a876df61065156acb703b2ea
+github.com/Graylog2/go-gelf v2
 
 github.com/fluent/fluent-logger-golang v1.2.1
 # fluent-logger-golang deps
@@ -102,17 +103,21 @@
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 
 # containerd
-github.com/containerd/containerd 06b9cb35161009dcb7123345749fef02f7cea8e0
-github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4
-github.com/stevvooe/continuity cd7a8e21e2b6f84799f5dd4b65faf49c8d3ee02d
-github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
+github.com/containerd/containerd 992280e8e265f491f7a624ab82f3e238be086e49
+github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
+github.com/containerd/continuity 35d55c5e8dd23b32037d56cf97174aff3efdfa83
+github.com/containerd/cgroups f7dd103d3e4e696aa67152f6b4ddd1779a3455a9
+github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
+github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7
+github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
+github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf
 
 # cluster
-github.com/docker/swarmkit ddb4539f883b18ea40af44ee6de63ac2adc8dc1e
+github.com/docker/swarmkit de950a7ed842c7b7e47e9451cde9bf8f96031894
 github.com/gogo/protobuf v0.4
 github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a
 github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e
-golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2
+golang.org/x/crypto 558b6879de74bc843225cde5686419267ff707ca
 golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb
 github.com/hashicorp/go-memdb cb9a474f84cc5e41b273b20c6927680b2a8776ad
 github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990
@@ -137,7 +142,7 @@
 # metrics
 github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
 
-github.com/opencontainers/selinux v1.0.0-rc1
+github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd
 
 # archive/tar
 # mkdir -p ./vendor/archive
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
index 1bd6057..bcbe00d 100644
--- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
+++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
@@ -5,7 +5,7 @@
 }
 
 func (csiState csiEntryState) Handle(b byte) (s state, e error) {
-	logger.Infof("CsiEntry::Handle %#x", b)
+	csiState.parser.logf("CsiEntry::Handle %#x", b)
 
 	nextState, err := csiState.baseState.Handle(b)
 	if nextState != nil || err != nil {
@@ -25,7 +25,7 @@
 }
 
 func (csiState csiEntryState) Transition(s state) error {
-	logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
+	csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
 	csiState.baseState.Transition(s)
 
 	switch s {
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
index 4be35c5..7ed5e01 100644
--- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
+++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
@@ -5,7 +5,7 @@
 }
 
 func (csiState csiParamState) Handle(b byte) (s state, e error) {
-	logger.Infof("CsiParam::Handle %#x", b)
+	csiState.parser.logf("CsiParam::Handle %#x", b)
 
 	nextState, err := csiState.baseState.Handle(b)
 	if nextState != nil || err != nil {
@@ -26,7 +26,7 @@
 }
 
 func (csiState csiParamState) Transition(s state) error {
-	logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
+	csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
 	csiState.baseState.Transition(s)
 
 	switch s {
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
index 2189eb6..1c719db 100644
--- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
+++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
@@ -5,7 +5,7 @@
 }
 
 func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
-	logger.Infof("escapeIntermediateState::Handle %#x", b)
+	escState.parser.logf("escapeIntermediateState::Handle %#x", b)
 	nextState, err := escState.baseState.Handle(b)
 	if nextState != nil || err != nil {
 		return nextState, err
@@ -24,7 +24,7 @@
 }
 
 func (escState escapeIntermediateState) Transition(s state) error {
-	logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
+	escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
 	escState.baseState.Transition(s)
 
 	switch s {
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go
index 7b1b9ad..6390abd 100644
--- a/vendor/github.com/Azure/go-ansiterm/escape_state.go
+++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go
@@ -5,7 +5,7 @@
 }
 
 func (escState escapeState) Handle(b byte) (s state, e error) {
-	logger.Infof("escapeState::Handle %#x", b)
+	escState.parser.logf("escapeState::Handle %#x", b)
 	nextState, err := escState.baseState.Handle(b)
 	if nextState != nil || err != nil {
 		return nextState, err
@@ -28,7 +28,7 @@
 }
 
 func (escState escapeState) Transition(s state) error {
-	logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name())
+	escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
 	escState.baseState.Transition(s)
 
 	switch s {
diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
index 24062d4..593b10a 100644
--- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
+++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
@@ -5,7 +5,7 @@
 }
 
 func (oscState oscStringState) Handle(b byte) (s state, e error) {
-	logger.Infof("OscString::Handle %#x", b)
+	oscState.parser.logf("OscString::Handle %#x", b)
 	nextState, err := oscState.baseState.Handle(b)
 	if nextState != nil || err != nil {
 		return nextState, err
diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go
index 3286a9c..03cec7a 100644
--- a/vendor/github.com/Azure/go-ansiterm/parser.go
+++ b/vendor/github.com/Azure/go-ansiterm/parser.go
@@ -2,14 +2,10 @@
 
 import (
 	"errors"
-	"io/ioutil"
+	"log"
 	"os"
-
-	"github.com/sirupsen/logrus"
 )
 
-var logger *logrus.Logger
-
 type AnsiParser struct {
 	currState          state
 	eventHandler       AnsiEventHandler
@@ -23,50 +19,69 @@
 	ground             state
 	oscString          state
 	stateMap           []state
+
+	logf func(string, ...interface{})
 }
 
-func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser {
-	logFile := ioutil.Discard
+type Option func(*AnsiParser)
 
-	if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
-		logFile, _ = os.Create("ansiParser.log")
+func WithLogf(f func(string, ...interface{})) Option {
+	return func(ap *AnsiParser) {
+		ap.logf = f
 	}
+}
 
-	logger = &logrus.Logger{
-		Out:       logFile,
-		Formatter: new(logrus.TextFormatter),
-		Level:     logrus.InfoLevel,
-	}
-
-	parser := &AnsiParser{
+func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
+	ap := &AnsiParser{
 		eventHandler: evtHandler,
 		context:      &ansiContext{},
 	}
-
-	parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}}
-	parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}}
-	parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}}
-	parser.escape = escapeState{baseState{name: "Escape", parser: parser}}
-	parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}}
-	parser.error = errorState{baseState{name: "Error", parser: parser}}
-	parser.ground = groundState{baseState{name: "Ground", parser: parser}}
-	parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}}
-
-	parser.stateMap = []state{
-		parser.csiEntry,
-		parser.csiParam,
-		parser.dcsEntry,
-		parser.escape,
-		parser.escapeIntermediate,
-		parser.error,
-		parser.ground,
-		parser.oscString,
+	for _, o := range opts {
+		o(ap)
 	}
 
-	parser.currState = getState(initialState, parser.stateMap)
+	if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
+		logFile, _ := os.Create("ansiParser.log")
+		logger := log.New(logFile, "", log.LstdFlags)
+		if ap.logf != nil {
+			l := ap.logf
+			ap.logf = func(s string, v ...interface{}) {
+				l(s, v...)
+				logger.Printf(s, v...)
+			}
+		} else {
+			ap.logf = logger.Printf
+		}
+	}
 
-	logger.Infof("CreateParser: parser %p", parser)
-	return parser
+	if ap.logf == nil {
+		ap.logf = func(string, ...interface{}) {}
+	}
+
+	ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
+	ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
+	ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
+	ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
+	ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
+	ap.error = errorState{baseState{name: "Error", parser: ap}}
+	ap.ground = groundState{baseState{name: "Ground", parser: ap}}
+	ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
+
+	ap.stateMap = []state{
+		ap.csiEntry,
+		ap.csiParam,
+		ap.dcsEntry,
+		ap.escape,
+		ap.escapeIntermediate,
+		ap.error,
+		ap.ground,
+		ap.oscString,
+	}
+
+	ap.currState = getState(initialState, ap.stateMap)
+
+	ap.logf("CreateParser: parser %p", ap)
+	return ap
 }
 
 func getState(name string, states []state) state {
@@ -97,7 +112,7 @@
 	}
 
 	if newState == nil {
-		logger.Warning("newState is nil")
+		ap.logf("WARNING: newState is nil")
 		return errors.New("New state of 'nil' is invalid.")
 	}
 
@@ -111,23 +126,23 @@
 }
 
 func (ap *AnsiParser) changeState(newState state) error {
-	logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
+	ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
 
 	// Exit old state
 	if err := ap.currState.Exit(); err != nil {
-		logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
+		ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
 		return err
 	}
 
 	// Perform transition action
 	if err := ap.currState.Transition(newState); err != nil {
-		logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
+		ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
 		return err
 	}
 
 	// Enter new state
 	if err := newState.Enter(); err != nil {
-		logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err)
+		ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
 		return err
 	}
 
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
index 8b69a67..de0a1f9 100644
--- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
+++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
@@ -27,7 +27,6 @@
 		params = append(params, s)
 	}
 
-	logger.Infof("Parsed params: %v with length: %d", params, len(params))
 	return params, nil
 }
 
@@ -37,7 +36,6 @@
 
 func getInt(params []string, dflt int) int {
 	i := getInts(params, 1, dflt)[0]
-	logger.Infof("getInt: %v", i)
 	return i
 }
 
@@ -60,8 +58,6 @@
 		}
 	}
 
-	logger.Infof("getInts: %v", ints)
-
 	return ints
 }
 
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
index 58750a2..0bb5e51 100644
--- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go
+++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
@@ -1,19 +1,15 @@
 package ansiterm
 
-import (
-	"fmt"
-)
-
 func (ap *AnsiParser) collectParam() error {
 	currChar := ap.context.currentChar
-	logger.Infof("collectParam %#x", currChar)
+	ap.logf("collectParam %#x", currChar)
 	ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
 	return nil
 }
 
 func (ap *AnsiParser) collectInter() error {
 	currChar := ap.context.currentChar
-	logger.Infof("collectInter %#x", currChar)
+	ap.logf("collectInter %#x", currChar)
 	ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
 	return nil
 }
@@ -21,8 +17,8 @@
 func (ap *AnsiParser) escDispatch() error {
 	cmd, _ := parseCmd(*ap.context)
 	intermeds := ap.context.interBuffer
-	logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar)
-	logger.Infof("escDispatch: %v(%v)", cmd, intermeds)
+	ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
+	ap.logf("escDispatch: %v(%v)", cmd, intermeds)
 
 	switch cmd {
 	case "D": // IND
@@ -43,8 +39,9 @@
 func (ap *AnsiParser) csiDispatch() error {
 	cmd, _ := parseCmd(*ap.context)
 	params, _ := parseParams(ap.context.paramBuffer)
+	ap.logf("Parsed params: %v with length: %d", params, len(params))
 
-	logger.Infof("csiDispatch: %v(%v)", cmd, params)
+	ap.logf("csiDispatch: %v(%v)", cmd, params)
 
 	switch cmd {
 	case "@":
@@ -102,7 +99,7 @@
 		top, bottom := ints[0], ints[1]
 		return ap.eventHandler.DECSTBM(top, bottom)
 	default:
-		logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context:  %v", cmd, ap.context))
+		ap.logf("ERROR: Unsupported CSI command: '%s', with full context:  %v", cmd, ap.context)
 		return nil
 	}
 
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
index daf2f06..a673279 100644
--- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
@@ -175,7 +175,7 @@
 
 	fd, err := syscall.GetStdHandle(nFile)
 	if err != nil {
-		panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err))
+		panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
 	}
 
 	return file, uintptr(fd)
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
index 462d92f..6055e33 100644
--- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
@@ -49,17 +49,22 @@
 const (
 	// Console modes
 	// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
-	ENABLE_PROCESSED_INPUT = 0x0001
-	ENABLE_LINE_INPUT      = 0x0002
-	ENABLE_ECHO_INPUT      = 0x0004
-	ENABLE_WINDOW_INPUT    = 0x0008
-	ENABLE_MOUSE_INPUT     = 0x0010
-	ENABLE_INSERT_MODE     = 0x0020
-	ENABLE_QUICK_EDIT_MODE = 0x0040
-	ENABLE_EXTENDED_FLAGS  = 0x0080
+	ENABLE_PROCESSED_INPUT        = 0x0001
+	ENABLE_LINE_INPUT             = 0x0002
+	ENABLE_ECHO_INPUT             = 0x0004
+	ENABLE_WINDOW_INPUT           = 0x0008
+	ENABLE_MOUSE_INPUT            = 0x0010
+	ENABLE_INSERT_MODE            = 0x0020
+	ENABLE_QUICK_EDIT_MODE        = 0x0040
+	ENABLE_EXTENDED_FLAGS         = 0x0080
+	ENABLE_AUTO_POSITION          = 0x0100
+	ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
 
-	ENABLE_PROCESSED_OUTPUT   = 0x0001
-	ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
+	ENABLE_PROCESSED_OUTPUT            = 0x0001
+	ENABLE_WRAP_AT_EOL_OUTPUT          = 0x0002
+	ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
+	DISABLE_NEWLINE_AUTO_RETURN        = 0x0008
+	ENABLE_LVB_GRID_WORLDWIDE          = 0x0010
 
 	// Character attributes
 	// Note:
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
index f015723..3ee06ea 100644
--- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
@@ -34,7 +34,7 @@
 	if err != nil {
 		return err
 	}
-	logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y)
+	h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
 	return err
 }
 
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
index 706d270..2d27fa1 100644
--- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
@@ -50,8 +50,8 @@
 
 // scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
 func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
-	logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
-	logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
+	h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
+	h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
 
 	// Copy from and clip to the scroll region (full buffer width)
 	scrollRect := SMALL_RECT{
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
index 48998bb..2d40fb7 100644
--- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
+++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
@@ -4,16 +4,13 @@
 
 import (
 	"bytes"
-	"io/ioutil"
+	"log"
 	"os"
 	"strconv"
 
 	"github.com/Azure/go-ansiterm"
-	"github.com/sirupsen/logrus"
 )
 
-var logger *logrus.Logger
-
 type windowsAnsiEventHandler struct {
 	fd             uintptr
 	file           *os.File
@@ -28,32 +25,52 @@
 	marginByte     byte
 	curInfo        *CONSOLE_SCREEN_BUFFER_INFO
 	curPos         COORD
+	logf           func(string, ...interface{})
 }
 
-func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler {
-	logFile := ioutil.Discard
+type Option func(*windowsAnsiEventHandler)
 
-	if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
-		logFile, _ = os.Create("winEventHandler.log")
+func WithLogf(f func(string, ...interface{})) Option {
+	return func(w *windowsAnsiEventHandler) {
+		w.logf = f
 	}
+}
 
-	logger = &logrus.Logger{
-		Out:       logFile,
-		Formatter: new(logrus.TextFormatter),
-		Level:     logrus.DebugLevel,
-	}
-
+func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
 	infoReset, err := GetConsoleScreenBufferInfo(fd)
 	if err != nil {
 		return nil
 	}
 
-	return &windowsAnsiEventHandler{
+	h := &windowsAnsiEventHandler{
 		fd:         fd,
 		file:       file,
 		infoReset:  infoReset,
 		attributes: infoReset.Attributes,
 	}
+	for _, o := range opts {
+		o(h)
+	}
+
+	if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
+		logFile, _ := os.Create("winEventHandler.log")
+		logger := log.New(logFile, "", log.LstdFlags)
+		if h.logf != nil {
+			l := h.logf
+			h.logf = func(s string, v ...interface{}) {
+				l(s, v...)
+				logger.Printf(s, v...)
+			}
+		} else {
+			h.logf = logger.Printf
+		}
+	}
+
+	if h.logf == nil {
+		h.logf = func(string, ...interface{}) {}
+	}
+
+	return h
 }
 
 type scrollRegion struct {
@@ -96,7 +113,7 @@
 		if err := h.Flush(); err != nil {
 			return false, err
 		}
-		logger.Info("Simulating LF inside scroll region")
+		h.logf("Simulating LF inside scroll region")
 		if err := h.scrollUp(1); err != nil {
 			return false, err
 		}
@@ -119,7 +136,7 @@
 	} else {
 		// The cursor is at the bottom of the screen but outside the scroll
 		// region. Skip the LF.
-		logger.Info("Simulating LF outside scroll region")
+		h.logf("Simulating LF outside scroll region")
 		if includeCR {
 			if err := h.Flush(); err != nil {
 				return false, err
@@ -151,7 +168,7 @@
 			if err := h.Flush(); err != nil {
 				return err
 			}
-			logger.Info("Resetting cursor position for LF without CR")
+			h.logf("Resetting cursor position for LF without CR")
 			if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
 				return err
 			}
@@ -186,7 +203,7 @@
 func (h *windowsAnsiEventHandler) Execute(b byte) error {
 	switch b {
 	case ansiterm.ANSI_TAB:
-		logger.Info("Execute(TAB)")
+		h.logf("Execute(TAB)")
 		// Move to the next tab stop, but preserve auto-wrap if already set.
 		if !h.wrapNext {
 			pos, info, err := h.getCurrentInfo()
@@ -269,7 +286,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)})
+	h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.moveCursorVertical(-param)
 }
@@ -278,7 +295,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)})
+	h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.moveCursorVertical(param)
 }
@@ -287,7 +304,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)})
+	h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.moveCursorHorizontal(param)
 }
@@ -296,7 +313,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)})
+	h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.moveCursorHorizontal(-param)
 }
@@ -305,7 +322,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)})
+	h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.moveCursorLine(param)
 }
@@ -314,7 +331,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)})
+	h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.moveCursorLine(-param)
 }
@@ -323,7 +340,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)})
+	h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.moveCursorColumn(param)
 }
@@ -332,7 +349,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("VPA: [[%d]]", param)
+	h.logf("VPA: [[%d]]", param)
 	h.clearWrap()
 	info, err := GetConsoleScreenBufferInfo(h.fd)
 	if err != nil {
@@ -348,7 +365,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("CUP: [[%d %d]]", row, col)
+	h.logf("CUP: [[%d %d]]", row, col)
 	h.clearWrap()
 	info, err := GetConsoleScreenBufferInfo(h.fd)
 	if err != nil {
@@ -364,7 +381,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("HVP: [[%d %d]]", row, col)
+	h.logf("HVP: [[%d %d]]", row, col)
 	h.clearWrap()
 	return h.CUP(row, col)
 }
@@ -373,7 +390,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
+	h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
 	h.clearWrap()
 	return nil
 }
@@ -382,7 +399,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)})
+	h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
 	h.clearWrap()
 	h.originMode = enable
 	return h.CUP(1, 1)
@@ -392,7 +409,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
+	h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
 	h.clearWrap()
 	if err := h.ED(2); err != nil {
 		return err
@@ -407,7 +424,7 @@
 	}
 	if info.Size.X < targetWidth {
 		if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
-			logger.Info("set buffer failed:", err)
+			h.logf("set buffer failed: %v", err)
 			return err
 		}
 	}
@@ -415,12 +432,12 @@
 	window.Left = 0
 	window.Right = targetWidth - 1
 	if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
-		logger.Info("set window failed:", err)
+		h.logf("set window failed: %v", err)
 		return err
 	}
 	if info.Size.X > targetWidth {
 		if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
-			logger.Info("set buffer failed:", err)
+			h.logf("set buffer failed: %v", err)
 			return err
 		}
 	}
@@ -431,7 +448,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("ED: [%v]", []string{strconv.Itoa(param)})
+	h.logf("ED: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 
 	// [J  -- Erases from the cursor to the end of the screen, including the cursor position.
@@ -490,7 +507,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("EL: [%v]", strconv.Itoa(param))
+	h.logf("EL: [%v]", strconv.Itoa(param))
 	h.clearWrap()
 
 	// [K  -- Erases from the cursor to the end of the line, including the cursor position.
@@ -531,7 +548,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("IL: [%v]", strconv.Itoa(param))
+	h.logf("IL: [%v]", strconv.Itoa(param))
 	h.clearWrap()
 	return h.insertLines(param)
 }
@@ -540,7 +557,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("DL: [%v]", strconv.Itoa(param))
+	h.logf("DL: [%v]", strconv.Itoa(param))
 	h.clearWrap()
 	return h.deleteLines(param)
 }
@@ -549,7 +566,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("ICH: [%v]", strconv.Itoa(param))
+	h.logf("ICH: [%v]", strconv.Itoa(param))
 	h.clearWrap()
 	return h.insertCharacters(param)
 }
@@ -558,7 +575,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("DCH: [%v]", strconv.Itoa(param))
+	h.logf("DCH: [%v]", strconv.Itoa(param))
 	h.clearWrap()
 	return h.deleteCharacters(param)
 }
@@ -572,7 +589,7 @@
 		strings = append(strings, strconv.Itoa(v))
 	}
 
-	logger.Infof("SGR: [%v]", strings)
+	h.logf("SGR: [%v]", strings)
 
 	if len(params) <= 0 {
 		h.attributes = h.infoReset.Attributes
@@ -606,7 +623,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("SU: [%v]", []string{strconv.Itoa(param)})
+	h.logf("SU: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.scrollUp(param)
 }
@@ -615,13 +632,13 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("SD: [%v]", []string{strconv.Itoa(param)})
+	h.logf("SD: [%v]", []string{strconv.Itoa(param)})
 	h.clearWrap()
 	return h.scrollDown(param)
 }
 
 func (h *windowsAnsiEventHandler) DA(params []string) error {
-	logger.Infof("DA: [%v]", params)
+	h.logf("DA: [%v]", params)
 	// DA cannot be implemented because it must send data on the VT100 input stream,
 	// which is not available to go-ansiterm.
 	return nil
@@ -631,7 +648,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Infof("DECSTBM: [%d, %d]", top, bottom)
+	h.logf("DECSTBM: [%d, %d]", top, bottom)
 
 	// Windows is 0 indexed, Linux is 1 indexed
 	h.sr.top = int16(top - 1)
@@ -646,7 +663,7 @@
 	if err := h.Flush(); err != nil {
 		return err
 	}
-	logger.Info("RI: []")
+	h.logf("RI: []")
 	h.clearWrap()
 
 	info, err := GetConsoleScreenBufferInfo(h.fd)
@@ -663,21 +680,21 @@
 }
 
 func (h *windowsAnsiEventHandler) IND() error {
-	logger.Info("IND: []")
+	h.logf("IND: []")
 	return h.executeLF()
 }
 
 func (h *windowsAnsiEventHandler) Flush() error {
 	h.curInfo = nil
 	if h.buffer.Len() > 0 {
-		logger.Infof("Flush: [%s]", h.buffer.Bytes())
+		h.logf("Flush: [%s]", h.buffer.Bytes())
 		if _, err := h.buffer.WriteTo(h.file); err != nil {
 			return err
 		}
 	}
 
 	if h.wrapNext && !h.drewMarginByte {
-		logger.Infof("Flush: drawing margin byte '%c'", h.marginByte)
+		h.logf("Flush: drawing margin byte '%c'", h.marginByte)
 
 		info, err := GetConsoleScreenBufferInfo(h.fd)
 		if err != nil {
diff --git a/vendor/github.com/Graylog2/go-gelf/README.md b/vendor/github.com/Graylog2/go-gelf/README.md
index 4900a35..81d5612 100644
--- a/vendor/github.com/Graylog2/go-gelf/README.md
+++ b/vendor/github.com/Graylog2/go-gelf/README.md
@@ -1,17 +1,53 @@
-go-gelf - GELF library and writer for Go
+go-gelf - GELF Library and Writer for Go
 ========================================
 
-GELF is graylog2's UDP logging format.  This library provides an API
-that applications can use to log messages directly to a graylog2
-server, along with an `io.Writer` that can be use to redirect the
-standard library's log messages (or `os.Stdout`), to a graylog2 server.
+[GELF] (Graylog Extended Log Format) is an application-level logging
+protocol that avoids many of the shortcomings of [syslog]. While it
+can be run over any stream or datagram transport protocol, it has
+special support ([chunking]) to allow long messages to be split over
+multiple datagrams.
+
+Versions
+--------
+
+In order to enable versionning of this package with Go, this project
+is using GoPkg.in. The default branch of this project will be v1
+for some time to prevent breaking clients. We encourage all project
+to change their imports to the new GoPkg.in URIs as soon as possible.
+
+To see up to date code, make sure to switch to the master branch.
+
+v1.0.0
+------
+
+This implementation currently supports UDP and TCP as a transport
+protocol. TLS is unsupported.
+
+The library provides an API that applications can use to log messages
+directly to a Graylog server and an `io.Writer` that can be used to
+redirect the standard library's log messages (`os.Stdout`) to a
+Graylog server.
+
+[GELF]: http://docs.graylog.org/en/2.2/pages/gelf.html
+[syslog]: https://tools.ietf.org/html/rfc5424
+[chunking]: http://docs.graylog.org/en/2.2/pages/gelf.html#chunked-gelf
+
 
 Installing
 ----------
 
 go-gelf is go get-able:
 
-	go get github.com/Graylog2/go-gelf/gelf
+    go get gopkg.in/Graylog2/go-gelf.v1/gelf
+
+    or
+
+    go get github.com/Graylog2/go-gelf/gelf
+
+This will get you version 1.0.0, with only UDP support and legacy API.
+Newer versions are available through GoPkg.in:
+
+    go get gopkg.in/Graylog2/go-gelf.v2/gelf
 
 Usage
 -----
@@ -21,50 +57,55 @@
 By using an `io.MultiWriter`, we can log to both stdout and graylog -
 giving us both centralized and local logs.  (Redundancy is nice).
 
-	package main
+```golang
+package main
 
-	import (
-		"flag"
-		"github.com/Graylog2/go-gelf/gelf"
-		"io"
-		"log"
-		"os"
-	)
+import (
+  "flag"
+  "gopkg.in/Graylog2/go-gelf.v2/gelf"
+  "io"
+  "log"
+  "os"
+)
 
-	func main() {
-		var graylogAddr string
+func main() {
+  var graylogAddr string
 
-		flag.StringVar(&graylogAddr, "graylog", "", "graylog server addr")
-		flag.Parse()
+  flag.StringVar(&graylogAddr, "graylog", "", "graylog server addr")
+  flag.Parse()
 
-		if graylogAddr != "" {
-			gelfWriter, err := gelf.NewWriter(graylogAddr)
-			if err != nil {
-				log.Fatalf("gelf.NewWriter: %s", err)
-			}
-			// log to both stderr and graylog2
-			log.SetOutput(io.MultiWriter(os.Stderr, gelfWriter))
-			log.Printf("logging to stderr & graylog2@'%s'", graylogAddr)
-		}
+  if graylogAddr != "" {
+          // If using UDP
+    gelfWriter, err := gelf.NewUDPWriter(graylogAddr)
+          // If using TCP
+          //gelfWriter, err := gelf.NewTCPWriter(graylogAddr)
+    if err != nil {
+      log.Fatalf("gelf.NewWriter: %s", err)
+    }
+    // log to both stderr and graylog2
+    log.SetOutput(io.MultiWriter(os.Stderr, gelfWriter))
+    log.Printf("logging to stderr & graylog2@'%s'", graylogAddr)
+  }
 
-		// From here on out, any calls to log.Print* functions
-		// will appear on stdout, and be sent over UDP to the
-		// specified Graylog2 server.
+  // From here on out, any calls to log.Print* functions
+  // will appear on stdout, and be sent over UDP or TCP to the
+  // specified Graylog2 server.
 
-		log.Printf("Hello gray World")
+  log.Printf("Hello gray World")
 
-		// ...
-	}
-
+  // ...
+}
+```
 The above program can be invoked as:
 
-	go run test.go -graylog=localhost:12201
+    go run test.go -graylog=localhost:12201
 
-Because GELF messages are sent over UDP, graylog server availability
-doesn't impact application performance or response time.  There is a
-small, fixed overhead per log call, regardless of whether the target
+When using UDP messages may be dropped or re-ordered. However, Graylog
+server availability will not impact application performance; there is
+a small, fixed overhead per log call regardless of whether the target
 server is reachable or not.
 
+
 To Do
 -----
 
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/message.go b/vendor/github.com/Graylog2/go-gelf/gelf/message.go
new file mode 100644
index 0000000..4e5a05e
--- /dev/null
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/message.go
@@ -0,0 +1,147 @@
+package gelf
+
+import (
+	"bytes"
+	"encoding/json"
+	"time"
+)
+
+// Message represents the contents of the GELF message.  It is gzipped
+// before sending.
+type Message struct {
+	Version  string                 `json:"version"`
+	Host     string                 `json:"host"`
+	Short    string                 `json:"short_message"`
+	Full     string                 `json:"full_message,omitempty"`
+	TimeUnix float64                `json:"timestamp"`
+	Level    int32                  `json:"level,omitempty"`
+	Facility string                 `json:"facility,omitempty"`
+	Extra    map[string]interface{} `json:"-"`
+	RawExtra json.RawMessage        `json:"-"`
+}
+
+// Syslog severity levels
+const (
+	LOG_EMERG   = 0
+	LOG_ALERT   = 1
+	LOG_CRIT    = 2
+	LOG_ERR     = 3
+	LOG_WARNING = 4
+	LOG_NOTICE  = 5
+	LOG_INFO    = 6
+	LOG_DEBUG   = 7
+)
+
+func (m *Message) MarshalJSONBuf(buf *bytes.Buffer) error {
+	b, err := json.Marshal(m)
+	if err != nil {
+		return err
+	}
+	// write up until the final }
+	if _, err = buf.Write(b[:len(b)-1]); err != nil {
+		return err
+	}
+	if len(m.Extra) > 0 {
+		eb, err := json.Marshal(m.Extra)
+		if err != nil {
+			return err
+		}
+		// merge serialized message + serialized extra map
+		if err = buf.WriteByte(','); err != nil {
+			return err
+		}
+		// write serialized extra bytes, without enclosing quotes
+		if _, err = buf.Write(eb[1 : len(eb)-1]); err != nil {
+			return err
+		}
+	}
+
+	if len(m.RawExtra) > 0 {
+		if err := buf.WriteByte(','); err != nil {
+			return err
+		}
+
+		// write serialized extra bytes, without enclosing quotes
+		if _, err = buf.Write(m.RawExtra[1 : len(m.RawExtra)-1]); err != nil {
+			return err
+		}
+	}
+
+	// write final closing quotes
+	return buf.WriteByte('}')
+}
+
+func (m *Message) UnmarshalJSON(data []byte) error {
+	i := make(map[string]interface{}, 16)
+	if err := json.Unmarshal(data, &i); err != nil {
+		return err
+	}
+	for k, v := range i {
+		if k[0] == '_' {
+			if m.Extra == nil {
+				m.Extra = make(map[string]interface{}, 1)
+			}
+			m.Extra[k] = v
+			continue
+		}
+		switch k {
+		case "version":
+			m.Version = v.(string)
+		case "host":
+			m.Host = v.(string)
+		case "short_message":
+			m.Short = v.(string)
+		case "full_message":
+			m.Full = v.(string)
+		case "timestamp":
+			m.TimeUnix = v.(float64)
+		case "level":
+			m.Level = int32(v.(float64))
+		case "facility":
+			m.Facility = v.(string)
+		}
+	}
+	return nil
+}
+
+func (m *Message) toBytes() (messageBytes []byte, err error) {
+	buf := newBuffer()
+	defer bufPool.Put(buf)
+	if err = m.MarshalJSONBuf(buf); err != nil {
+		return nil, err
+	}
+	messageBytes = buf.Bytes()
+	return messageBytes, nil
+}
+
+func constructMessage(p []byte, hostname string, facility string, file string, line int) (m *Message) {
+	// remove trailing and leading whitespace
+	p = bytes.TrimSpace(p)
+
+	// If there are newlines in the message, use the first line
+	// for the short message and set the full message to the
+	// original input.  If the input has no newlines, stick the
+	// whole thing in Short.
+	short := p
+	full := []byte("")
+	if i := bytes.IndexRune(p, '\n'); i > 0 {
+		short = p[:i]
+		full = p
+	}
+
+	m = &Message{
+		Version:  "1.1",
+		Host:     hostname,
+		Short:    string(short),
+		Full:     string(full),
+		TimeUnix: float64(time.Now().Unix()),
+		Level:    6, // info
+		Facility: facility,
+		Extra: map[string]interface{}{
+			"_file": file,
+			"_line": line,
+		},
+	}
+
+	return m
+}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go b/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
new file mode 100644
index 0000000..8f22c9a
--- /dev/null
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/tcpreader.go
@@ -0,0 +1,93 @@
+package gelf
+
+import (
+	"bufio"
+	"encoding/json"
+	"fmt"
+	"net"
+)
+
+type TCPReader struct {
+	listener *net.TCPListener
+	conn     net.Conn
+	messages chan []byte
+}
+
+func newTCPReader(addr string) (*TCPReader, chan string, error) {
+	var err error
+	tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
+	if err != nil {
+		return nil, nil, fmt.Errorf("ResolveTCPAddr('%s'): %s", addr, err)
+	}
+
+	listener, err := net.ListenTCP("tcp", tcpAddr)
+	if err != nil {
+		return nil, nil, fmt.Errorf("ListenTCP: %s", err)
+	}
+
+	r := &TCPReader{
+		listener: listener,
+		messages: make(chan []byte, 100), // Make a buffered channel with at most 100 messages
+	}
+
+	signal := make(chan string, 1)
+
+	go r.listenUntilCloseSignal(signal)
+
+	return r, signal, nil
+}
+
+func (r *TCPReader) listenUntilCloseSignal(signal chan string) {
+	defer func() { signal <- "done" }()
+	defer r.listener.Close()
+	for {
+		conn, err := r.listener.Accept()
+		if err != nil {
+			break
+		}
+		go handleConnection(conn, r.messages)
+		select {
+		case sig := <-signal:
+			if sig == "stop" {
+				break
+			}
+		default:
+		}
+	}
+}
+
+func (r *TCPReader) addr() string {
+	return r.listener.Addr().String()
+}
+
+func handleConnection(conn net.Conn, messages chan<- []byte) {
+	defer conn.Close()
+	reader := bufio.NewReader(conn)
+
+	var b []byte
+	var err error
+
+	for {
+		if b, err = reader.ReadBytes(0); err != nil {
+			continue
+		}
+		if len(b) > 0 {
+			messages <- b
+		}
+	}
+}
+
+func (r *TCPReader) readMessage() (*Message, error) {
+	b := <-r.messages
+
+	var msg Message
+	if err := json.Unmarshal(b[:len(b)-1], &msg); err != nil {
+		return nil, fmt.Errorf("json.Unmarshal: %s", err)
+	}
+
+	return &msg, nil
+}
+
+func (r *TCPReader) Close() {
+	r.listener.Close()
+}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go b/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
new file mode 100644
index 0000000..ab95cbc
--- /dev/null
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/tcpwriter.go
@@ -0,0 +1,97 @@
+package gelf
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"sync"
+	"time"
+)
+
+const (
+	DefaultMaxReconnect   = 3
+	DefaultReconnectDelay = 1
+)
+
+type TCPWriter struct {
+	GelfWriter
+	mu             sync.Mutex
+	MaxReconnect   int
+	ReconnectDelay time.Duration
+}
+
+func NewTCPWriter(addr string) (*TCPWriter, error) {
+	var err error
+	w := new(TCPWriter)
+	w.MaxReconnect = DefaultMaxReconnect
+	w.ReconnectDelay = DefaultReconnectDelay
+	w.proto = "tcp"
+	w.addr = addr
+
+	if w.conn, err = net.Dial("tcp", addr); err != nil {
+		return nil, err
+	}
+	if w.hostname, err = os.Hostname(); err != nil {
+		return nil, err
+	}
+
+	return w, nil
+}
+
+// WriteMessage sends the specified message to the GELF server
+// specified in the call to New().  It assumes all the fields are
+// filled out appropriately.  In general, clients will want to use
+// Write, rather than WriteMessage.
+func (w *TCPWriter) WriteMessage(m *Message) (err error) {
+	messageBytes, err := m.toBytes()
+	if err != nil {
+		return err
+	}
+
+	messageBytes = append(messageBytes, 0)
+
+	n, err := w.writeToSocketWithReconnectAttempts(messageBytes)
+	if err != nil {
+		return err
+	}
+	if n != len(messageBytes) {
+		return fmt.Errorf("bad write (%d/%d)", n, len(messageBytes))
+	}
+
+	return nil
+}
+
+func (w *TCPWriter) Write(p []byte) (n int, err error) {
+	file, line := getCallerIgnoringLogMulti(1)
+
+	m := constructMessage(p, w.hostname, w.Facility, file, line)
+
+	if err = w.WriteMessage(m); err != nil {
+		return 0, err
+	}
+
+	return len(p), nil
+}
+
+func (w *TCPWriter) writeToSocketWithReconnectAttempts(zBytes []byte) (n int, err error) {
+	var errConn error
+
+	w.mu.Lock()
+	for i := 0; n <= w.MaxReconnect; i++ {
+		errConn = nil
+
+		n, err = w.conn.Write(zBytes)
+		if err != nil {
+			time.Sleep(w.ReconnectDelay * time.Second)
+			w.conn, errConn = net.Dial("tcp", w.addr)
+		} else {
+			break
+		}
+	}
+	w.mu.Unlock()
+
+	if errConn != nil {
+		return 0, fmt.Errorf("Write Failed: %s\nReconnection failed: %s", err, errConn)
+	}
+	return n, nil
+}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/udpwriter.go b/vendor/github.com/Graylog2/go-gelf/gelf/udpwriter.go
new file mode 100644
index 0000000..23bbd5e
--- /dev/null
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/udpwriter.go
@@ -0,0 +1,231 @@
+// Copyright 2012 SocialCode. All rights reserved.
+// Use of this source code is governed by the MIT
+// license that can be found in the LICENSE file.
+
+package gelf
+
+import (
+	"bytes"
+	"compress/flate"
+	"compress/gzip"
+	"compress/zlib"
+	"crypto/rand"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"path"
+	"sync"
+)
+
+type UDPWriter struct {
+	GelfWriter
+	CompressionLevel int // one of the consts from compress/flate
+	CompressionType  CompressType
+}
+
+// What compression type the writer should use when sending messages
+// to the graylog2 server
+type CompressType int
+
+const (
+	CompressGzip CompressType = iota
+	CompressZlib
+	CompressNone
+)
+
+// Used to control GELF chunking.  Should be less than (MTU - len(UDP
+// header)).
+//
+// TODO: generate dynamically using Path MTU Discovery?
+const (
+	ChunkSize        = 1420
+	chunkedHeaderLen = 12
+	chunkedDataLen   = ChunkSize - chunkedHeaderLen
+)
+
+var (
+	magicChunked = []byte{0x1e, 0x0f}
+	magicZlib    = []byte{0x78}
+	magicGzip    = []byte{0x1f, 0x8b}
+)
+
+// numChunks returns the number of GELF chunks necessary to transmit
+// the given compressed buffer.
+func numChunks(b []byte) int {
+	lenB := len(b)
+	if lenB <= ChunkSize {
+		return 1
+	}
+	return len(b)/chunkedDataLen + 1
+}
+
+// New returns a new GELF Writer.  This writer can be used to send the
+// output of the standard Go log functions to a central GELF server by
+// passing it to log.SetOutput()
+func NewUDPWriter(addr string) (*UDPWriter, error) {
+	var err error
+	w := new(UDPWriter)
+	w.CompressionLevel = flate.BestSpeed
+
+	if w.conn, err = net.Dial("udp", addr); err != nil {
+		return nil, err
+	}
+	if w.hostname, err = os.Hostname(); err != nil {
+		return nil, err
+	}
+
+	w.Facility = path.Base(os.Args[0])
+
+	return w, nil
+}
+
+// writes the gzip compressed byte array to the connection as a series
+// of GELF chunked messages.  The format is documented at
+// http://docs.graylog.org/en/2.1/pages/gelf.html as:
+//
+//     2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte
+//     total, chunk-data
+func (w *GelfWriter) writeChunked(zBytes []byte) (err error) {
+	b := make([]byte, 0, ChunkSize)
+	buf := bytes.NewBuffer(b)
+	nChunksI := numChunks(zBytes)
+	if nChunksI > 128 {
+		return fmt.Errorf("msg too large, would need %d chunks", nChunksI)
+	}
+	nChunks := uint8(nChunksI)
+	// use urandom to get a unique message id
+	msgId := make([]byte, 8)
+	n, err := io.ReadFull(rand.Reader, msgId)
+	if err != nil || n != 8 {
+		return fmt.Errorf("rand.Reader: %d/%s", n, err)
+	}
+
+	bytesLeft := len(zBytes)
+	for i := uint8(0); i < nChunks; i++ {
+		buf.Reset()
+		// manually write header.  Don't care about
+		// host/network byte order, because the spec only
+		// deals in individual bytes.
+		buf.Write(magicChunked) //magic
+		buf.Write(msgId)
+		buf.WriteByte(i)
+		buf.WriteByte(nChunks)
+		// slice out our chunk from zBytes
+		chunkLen := chunkedDataLen
+		if chunkLen > bytesLeft {
+			chunkLen = bytesLeft
+		}
+		off := int(i) * chunkedDataLen
+		chunk := zBytes[off : off+chunkLen]
+		buf.Write(chunk)
+
+		// write this chunk, and make sure the write was good
+		n, err := w.conn.Write(buf.Bytes())
+		if err != nil {
+			return fmt.Errorf("Write (chunk %d/%d): %s", i,
+				nChunks, err)
+		}
+		if n != len(buf.Bytes()) {
+			return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)",
+				i, nChunks, n, len(buf.Bytes()))
+		}
+
+		bytesLeft -= chunkLen
+	}
+
+	if bytesLeft != 0 {
+		return fmt.Errorf("error: %d bytes left after sending", bytesLeft)
+	}
+	return nil
+}
+
+// 1k bytes buffer by default
+var bufPool = sync.Pool{
+	New: func() interface{} {
+		return bytes.NewBuffer(make([]byte, 0, 1024))
+	},
+}
+
+func newBuffer() *bytes.Buffer {
+	b := bufPool.Get().(*bytes.Buffer)
+	if b != nil {
+		b.Reset()
+		return b
+	}
+	return bytes.NewBuffer(nil)
+}
+
+// WriteMessage sends the specified message to the GELF server
+// specified in the call to New().  It assumes all the fields are
+// filled out appropriately.  In general, clients will want to use
+// Write, rather than WriteMessage.
+func (w *UDPWriter) WriteMessage(m *Message) (err error) {
+	mBuf := newBuffer()
+	defer bufPool.Put(mBuf)
+	if err = m.MarshalJSONBuf(mBuf); err != nil {
+		return err
+	}
+	mBytes := mBuf.Bytes()
+
+	var (
+		zBuf   *bytes.Buffer
+		zBytes []byte
+	)
+
+	var zw io.WriteCloser
+	switch w.CompressionType {
+	case CompressGzip:
+		zBuf = newBuffer()
+		defer bufPool.Put(zBuf)
+		zw, err = gzip.NewWriterLevel(zBuf, w.CompressionLevel)
+	case CompressZlib:
+		zBuf = newBuffer()
+		defer bufPool.Put(zBuf)
+		zw, err = zlib.NewWriterLevel(zBuf, w.CompressionLevel)
+	case CompressNone:
+		zBytes = mBytes
+	default:
+		panic(fmt.Sprintf("unknown compression type %d",
+			w.CompressionType))
+	}
+	if zw != nil {
+		if err != nil {
+			return
+		}
+		if _, err = zw.Write(mBytes); err != nil {
+			zw.Close()
+			return
+		}
+		zw.Close()
+		zBytes = zBuf.Bytes()
+	}
+
+	if numChunks(zBytes) > 1 {
+		return w.writeChunked(zBytes)
+	}
+	n, err := w.conn.Write(zBytes)
+	if err != nil {
+		return
+	}
+	if n != len(zBytes) {
+		return fmt.Errorf("bad write (%d/%d)", n, len(zBytes))
+	}
+
+	return nil
+}
+
+// Write encodes the given string in a GELF message and sends it to
+// the server specified in New().
+func (w *UDPWriter) Write(p []byte) (n int, err error) {
+	// 1 for the function that called us.
+	file, line := getCallerIgnoringLogMulti(1)
+
+	m := constructMessage(p, w.hostname, w.Facility, file, line)
+
+	if err = w.WriteMessage(m); err != nil {
+		return 0, err
+	}
+
+	return len(p), nil
+}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/utils.go b/vendor/github.com/Graylog2/go-gelf/gelf/utils.go
new file mode 100644
index 0000000..6f1c9f7
--- /dev/null
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/utils.go
@@ -0,0 +1,41 @@
+package gelf
+
+import (
+	"runtime"
+	"strings"
+)
+
+// getCaller returns the filename and the line info of a function
+// further down in the call stack.  Passing 0 in as callDepth would
+// return info on the function calling getCallerIgnoringLog, 1 the
+// parent function, and so on.  Any suffixes passed to getCaller are
+// path fragments like "/pkg/log/log.go", and functions in the call
+// stack from that file are ignored.
+func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) {
+	// bump by 1 to ignore the getCaller (this) stackframe
+	callDepth++
+outer:
+	for {
+		var ok bool
+		_, file, line, ok = runtime.Caller(callDepth)
+		if !ok {
+			file = "???"
+			line = 0
+			break
+		}
+
+		for _, s := range suffixesToIgnore {
+			if strings.HasSuffix(file, s) {
+				callDepth++
+				continue outer
+			}
+		}
+		break
+	}
+	return
+}
+
+func getCallerIgnoringLogMulti(callDepth int) (string, int) {
+	// the +1 is to ignore this (getCallerIgnoringLogMulti) frame
+	return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go")
+}
diff --git a/vendor/github.com/Graylog2/go-gelf/gelf/writer.go b/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
index 38e6944..93c3692 100644
--- a/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
+++ b/vendor/github.com/Graylog2/go-gelf/gelf/writer.go
@@ -5,414 +5,27 @@
 package gelf
 
 import (
-	"bytes"
-	"compress/flate"
-	"compress/gzip"
-	"compress/zlib"
-	"crypto/rand"
-	"encoding/json"
-	"fmt"
-	"io"
 	"net"
-	"os"
-	"path"
-	"runtime"
-	"strings"
-	"sync"
-	"time"
 )
 
+type Writer interface {
+	Close() error
+	Write([]byte) (int, error)
+	WriteMessage(*Message) error
+}
+
 // Writer implements io.Writer and is used to send both discrete
 // messages to a graylog2 server, or data from a stream-oriented
 // interface (like the functions in log).
-type Writer struct {
-	mu               sync.Mutex
-	conn             net.Conn
-	hostname         string
-	Facility         string // defaults to current process name
-	CompressionLevel int    // one of the consts from compress/flate
-	CompressionType  CompressType
-}
-
-// What compression type the writer should use when sending messages
-// to the graylog2 server
-type CompressType int
-
-const (
-	CompressGzip CompressType = iota
-	CompressZlib
-	CompressNone
-)
-
-// Message represents the contents of the GELF message.  It is gzipped
-// before sending.
-type Message struct {
-	Version  string                 `json:"version"`
-	Host     string                 `json:"host"`
-	Short    string                 `json:"short_message"`
-	Full     string                 `json:"full_message,omitempty"`
-	TimeUnix float64                `json:"timestamp"`
-	Level    int32                  `json:"level,omitempty"`
-	Facility string                 `json:"facility,omitempty"`
-	Extra    map[string]interface{} `json:"-"`
-	RawExtra json.RawMessage        `json:"-"`
-}
-
-// Used to control GELF chunking.  Should be less than (MTU - len(UDP
-// header)).
-//
-// TODO: generate dynamically using Path MTU Discovery?
-const (
-	ChunkSize        = 1420
-	chunkedHeaderLen = 12
-	chunkedDataLen   = ChunkSize - chunkedHeaderLen
-)
-
-var (
-	magicChunked = []byte{0x1e, 0x0f}
-	magicZlib    = []byte{0x78}
-	magicGzip    = []byte{0x1f, 0x8b}
-)
-
-// Syslog severity levels
-const (
-	LOG_EMERG   = int32(0)
-	LOG_ALERT   = int32(1)
-	LOG_CRIT    = int32(2)
-	LOG_ERR     = int32(3)
-	LOG_WARNING = int32(4)
-	LOG_NOTICE  = int32(5)
-	LOG_INFO    = int32(6)
-	LOG_DEBUG   = int32(7)
-)
-
-// numChunks returns the number of GELF chunks necessary to transmit
-// the given compressed buffer.
-func numChunks(b []byte) int {
-	lenB := len(b)
-	if lenB <= ChunkSize {
-		return 1
-	}
-	return len(b)/chunkedDataLen + 1
-}
-
-// New returns a new GELF Writer.  This writer can be used to send the
-// output of the standard Go log functions to a central GELF server by
-// passing it to log.SetOutput()
-func NewWriter(addr string) (*Writer, error) {
-	var err error
-	w := new(Writer)
-	w.CompressionLevel = flate.BestSpeed
-
-	if w.conn, err = net.Dial("udp", addr); err != nil {
-		return nil, err
-	}
-	if w.hostname, err = os.Hostname(); err != nil {
-		return nil, err
-	}
-
-	w.Facility = path.Base(os.Args[0])
-
-	return w, nil
-}
-
-// writes the gzip compressed byte array to the connection as a series
-// of GELF chunked messages.  The format is documented at
-// http://docs.graylog.org/en/2.1/pages/gelf.html as:
-//
-//     2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte
-//     total, chunk-data
-func (w *Writer) writeChunked(zBytes []byte) (err error) {
-	b := make([]byte, 0, ChunkSize)
-	buf := bytes.NewBuffer(b)
-	nChunksI := numChunks(zBytes)
-	if nChunksI > 128 {
-		return fmt.Errorf("msg too large, would need %d chunks", nChunksI)
-	}
-	nChunks := uint8(nChunksI)
-	// use urandom to get a unique message id
-	msgId := make([]byte, 8)
-	n, err := io.ReadFull(rand.Reader, msgId)
-	if err != nil || n != 8 {
-		return fmt.Errorf("rand.Reader: %d/%s", n, err)
-	}
-
-	bytesLeft := len(zBytes)
-	for i := uint8(0); i < nChunks; i++ {
-		buf.Reset()
-		// manually write header.  Don't care about
-		// host/network byte order, because the spec only
-		// deals in individual bytes.
-		buf.Write(magicChunked) //magic
-		buf.Write(msgId)
-		buf.WriteByte(i)
-		buf.WriteByte(nChunks)
-		// slice out our chunk from zBytes
-		chunkLen := chunkedDataLen
-		if chunkLen > bytesLeft {
-			chunkLen = bytesLeft
-		}
-		off := int(i) * chunkedDataLen
-		chunk := zBytes[off : off+chunkLen]
-		buf.Write(chunk)
-
-		// write this chunk, and make sure the write was good
-		n, err := w.conn.Write(buf.Bytes())
-		if err != nil {
-			return fmt.Errorf("Write (chunk %d/%d): %s", i,
-				nChunks, err)
-		}
-		if n != len(buf.Bytes()) {
-			return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)",
-				i, nChunks, n, len(buf.Bytes()))
-		}
-
-		bytesLeft -= chunkLen
-	}
-
-	if bytesLeft != 0 {
-		return fmt.Errorf("error: %d bytes left after sending", bytesLeft)
-	}
-	return nil
-}
-
-// 1k bytes buffer by default
-var bufPool = sync.Pool{
-	New: func() interface{} {
-		return bytes.NewBuffer(make([]byte, 0, 1024))
-	},
-}
-
-func newBuffer() *bytes.Buffer {
-	b := bufPool.Get().(*bytes.Buffer)
-	if b != nil {
-		b.Reset()
-		return b
-	}
-	return bytes.NewBuffer(nil)
-}
-
-// WriteMessage sends the specified message to the GELF server
-// specified in the call to New().  It assumes all the fields are
-// filled out appropriately.  In general, clients will want to use
-// Write, rather than WriteMessage.
-func (w *Writer) WriteMessage(m *Message) (err error) {
-	mBuf := newBuffer()
-	defer bufPool.Put(mBuf)
-	if err = m.MarshalJSONBuf(mBuf); err != nil {
-		return err
-	}
-	mBytes := mBuf.Bytes()
-
-	var (
-		zBuf   *bytes.Buffer
-		zBytes []byte
-	)
-
-	var zw io.WriteCloser
-	switch w.CompressionType {
-	case CompressGzip:
-		zBuf = newBuffer()
-		defer bufPool.Put(zBuf)
-		zw, err = gzip.NewWriterLevel(zBuf, w.CompressionLevel)
-	case CompressZlib:
-		zBuf = newBuffer()
-		defer bufPool.Put(zBuf)
-		zw, err = zlib.NewWriterLevel(zBuf, w.CompressionLevel)
-	case CompressNone:
-		zBytes = mBytes
-	default:
-		panic(fmt.Sprintf("unknown compression type %d",
-			w.CompressionType))
-	}
-	if zw != nil {
-		if err != nil {
-			return
-		}
-		if _, err = zw.Write(mBytes); err != nil {
-			zw.Close()
-			return
-		}
-		zw.Close()
-		zBytes = zBuf.Bytes()
-	}
-
-	if numChunks(zBytes) > 1 {
-		return w.writeChunked(zBytes)
-	}
-	n, err := w.conn.Write(zBytes)
-	if err != nil {
-		return
-	}
-	if n != len(zBytes) {
-		return fmt.Errorf("bad write (%d/%d)", n, len(zBytes))
-	}
-
-	return nil
+type GelfWriter struct {
+	addr     string
+	conn     net.Conn
+	hostname string
+	Facility string // defaults to current process name
+	proto    string
 }
 
 // Close connection and interrupt blocked Read or Write operations
-func (w *Writer) Close() error {
+func (w *GelfWriter) Close() error {
 	return w.conn.Close()
 }
-
-/*
-func (w *Writer) Alert(m string) (err error)
-func (w *Writer) Close() error
-func (w *Writer) Crit(m string) (err error)
-func (w *Writer) Debug(m string) (err error)
-func (w *Writer) Emerg(m string) (err error)
-func (w *Writer) Err(m string) (err error)
-func (w *Writer) Info(m string) (err error)
-func (w *Writer) Notice(m string) (err error)
-func (w *Writer) Warning(m string) (err error)
-*/
-
-// getCaller returns the filename and the line info of a function
-// further down in the call stack.  Passing 0 in as callDepth would
-// return info on the function calling getCallerIgnoringLog, 1 the
-// parent function, and so on.  Any suffixes passed to getCaller are
-// path fragments like "/pkg/log/log.go", and functions in the call
-// stack from that file are ignored.
-func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) {
-	// bump by 1 to ignore the getCaller (this) stackframe
-	callDepth++
-outer:
-	for {
-		var ok bool
-		_, file, line, ok = runtime.Caller(callDepth)
-		if !ok {
-			file = "???"
-			line = 0
-			break
-		}
-
-		for _, s := range suffixesToIgnore {
-			if strings.HasSuffix(file, s) {
-				callDepth++
-				continue outer
-			}
-		}
-		break
-	}
-	return
-}
-
-func getCallerIgnoringLogMulti(callDepth int) (string, int) {
-	// the +1 is to ignore this (getCallerIgnoringLogMulti) frame
-	return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go")
-}
-
-// Write encodes the given string in a GELF message and sends it to
-// the server specified in New().
-func (w *Writer) Write(p []byte) (n int, err error) {
-
-	// 1 for the function that called us.
-	file, line := getCallerIgnoringLogMulti(1)
-
-	// remove trailing and leading whitespace
-	p = bytes.TrimSpace(p)
-
-	// If there are newlines in the message, use the first line
-	// for the short message and set the full message to the
-	// original input.  If the input has no newlines, stick the
-	// whole thing in Short.
-	short := p
-	full := []byte("")
-	if i := bytes.IndexRune(p, '\n'); i > 0 {
-		short = p[:i]
-		full = p
-	}
-
-	m := Message{
-		Version:  "1.1",
-		Host:     w.hostname,
-		Short:    string(short),
-		Full:     string(full),
-		TimeUnix: float64(time.Now().Unix()),
-		Level:    6, // info
-		Facility: w.Facility,
-		Extra: map[string]interface{}{
-			"_file": file,
-			"_line": line,
-		},
-	}
-
-	if err = w.WriteMessage(&m); err != nil {
-		return 0, err
-	}
-
-	return len(p), nil
-}
-
-func (m *Message) MarshalJSONBuf(buf *bytes.Buffer) error {
-	b, err := json.Marshal(m)
-	if err != nil {
-		return err
-	}
-	// write up until the final }
-	if _, err = buf.Write(b[:len(b)-1]); err != nil {
-		return err
-	}
-	if len(m.Extra) > 0 {
-		eb, err := json.Marshal(m.Extra)
-		if err != nil {
-			return err
-		}
-		// merge serialized message + serialized extra map
-		if err = buf.WriteByte(','); err != nil {
-			return err
-		}
-		// write serialized extra bytes, without enclosing quotes
-		if _, err = buf.Write(eb[1 : len(eb)-1]); err != nil {
-			return err
-		}
-	}
-
-	if len(m.RawExtra) > 0 {
-		if err := buf.WriteByte(','); err != nil {
-			return err
-		}
-
-		// write serialized extra bytes, without enclosing quotes
-		if _, err = buf.Write(m.RawExtra[1 : len(m.RawExtra)-1]); err != nil {
-			return err
-		}
-	}
-
-	// write final closing quotes
-	return buf.WriteByte('}')
-}
-
-func (m *Message) UnmarshalJSON(data []byte) error {
-	i := make(map[string]interface{}, 16)
-	if err := json.Unmarshal(data, &i); err != nil {
-		return err
-	}
-	for k, v := range i {
-		if k[0] == '_' {
-			if m.Extra == nil {
-				m.Extra = make(map[string]interface{}, 1)
-			}
-			m.Extra[k] = v
-			continue
-		}
-		switch k {
-		case "version":
-			m.Version = v.(string)
-		case "host":
-			m.Host = v.(string)
-		case "short_message":
-			m.Short = v.(string)
-		case "full_message":
-			m.Full = v.(string)
-		case "timestamp":
-			m.TimeUnix = v.(float64)
-		case "level":
-			m.Level = int32(v.(float64))
-		case "facility":
-			m.Facility = v.(string)
-		}
-	}
-	return nil
-}
diff --git a/vendor/github.com/Microsoft/hcsshim/container.go b/vendor/github.com/Microsoft/hcsshim/container.go
index b924d39..3354f70 100644
--- a/vendor/github.com/Microsoft/hcsshim/container.go
+++ b/vendor/github.com/Microsoft/hcsshim/container.go
@@ -201,12 +201,18 @@
 
 	if createError == nil || IsPending(createError) {
 		if err := container.registerCallback(); err != nil {
+			// Terminate the container if it still exists. We're okay to ignore a failure here.
+			container.Terminate()
 			return nil, makeContainerError(container, operation, "", err)
 		}
 	}
 
 	err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout)
 	if err != nil {
+		if err == ErrTimeout {
+			// Terminate the container if it still exists. We're okay to ignore a failure here.
+			container.Terminate()
+		}
 		return nil, makeContainerError(container, operation, configuration, err)
 	}
 
diff --git a/vendor/github.com/Microsoft/hcsshim/interface.go b/vendor/github.com/Microsoft/hcsshim/interface.go
index 9fc7852..e21f300 100644
--- a/vendor/github.com/Microsoft/hcsshim/interface.go
+++ b/vendor/github.com/Microsoft/hcsshim/interface.go
@@ -30,11 +30,12 @@
 }
 
 type MappedDir struct {
-	HostPath         string
-	ContainerPath    string
-	ReadOnly         bool
-	BandwidthMaximum uint64
-	IOPSMaximum      uint64
+	HostPath          string
+	ContainerPath     string
+	ReadOnly          bool
+	BandwidthMaximum  uint64
+	IOPSMaximum       uint64
+	CreateInUtilityVM bool
 }
 
 type MappedPipe struct {
diff --git a/vendor/github.com/Microsoft/opengcs/client/config.go b/vendor/github.com/Microsoft/opengcs/client/config.go
index 1211957..5ece88b 100644
--- a/vendor/github.com/Microsoft/opengcs/client/config.go
+++ b/vendor/github.com/Microsoft/opengcs/client/config.go
@@ -93,10 +93,10 @@
 			case "lcow.timeout":
 				var err error
 				if rOpts.TimeoutSeconds, err = strconv.Atoi(opt[1]); err != nil {
-					return rOpts, fmt.Errorf("opengcstimeoutsecs option could not be interpreted as an integer")
+					return rOpts, fmt.Errorf("lcow.timeout option could not be interpreted as an integer")
 				}
 				if rOpts.TimeoutSeconds < 0 {
-					return rOpts, fmt.Errorf("opengcstimeoutsecs option cannot be negative")
+					return rOpts, fmt.Errorf("lcow.timeout option cannot be negative")
 				}
 			}
 		}
@@ -242,7 +242,7 @@
 		configuration.HvRuntime = &hcsshim.HvRuntime{
 			ImagePath:          config.Vhdx,
 			BootSource:         "Vhd",
-			WritableBootSource: true,
+			WritableBootSource: false,
 		}
 	} else {
 		configuration.HvRuntime = &hcsshim.HvRuntime{
diff --git a/vendor/github.com/Microsoft/opengcs/client/createext4vhdx.go b/vendor/github.com/Microsoft/opengcs/client/createext4vhdx.go
index b53ce25..48daaeb 100644
--- a/vendor/github.com/Microsoft/opengcs/client/createext4vhdx.go
+++ b/vendor/github.com/Microsoft/opengcs/client/createext4vhdx.go
@@ -57,6 +57,8 @@
 		return fmt.Errorf("failed to create VHDx %s: %s", destFile, err)
 	}
 
+	defer config.DebugGCS()
+
 	// Attach it to the utility VM, but don't mount it (as there's no filesystem on it)
 	if err := config.HotAddVhd(destFile, "", false, false); err != nil {
 		return fmt.Errorf("opengcs: CreateExt4Vhdx: failed to hot-add %s to utility VM: %s", cacheFile, err)
diff --git a/vendor/github.com/Microsoft/opengcs/client/hotaddvhd.go b/vendor/github.com/Microsoft/opengcs/client/hotaddvhd.go
index daf7c25..ef1e51f 100644
--- a/vendor/github.com/Microsoft/opengcs/client/hotaddvhd.go
+++ b/vendor/github.com/Microsoft/opengcs/client/hotaddvhd.go
@@ -20,6 +20,8 @@
 		return fmt.Errorf("cannot hot-add VHD as no utility VM is in configuration")
 	}
 
+	defer config.DebugGCS()
+
 	modification := &hcsshim.ResourceModificationRequestResponse{
 		Resource: "MappedVirtualDisk",
 		Data: hcsshim.MappedVirtualDisk{
diff --git a/vendor/github.com/Microsoft/opengcs/client/hotremovevhd.go b/vendor/github.com/Microsoft/opengcs/client/hotremovevhd.go
index cf19712..be63189 100644
--- a/vendor/github.com/Microsoft/opengcs/client/hotremovevhd.go
+++ b/vendor/github.com/Microsoft/opengcs/client/hotremovevhd.go
@@ -18,6 +18,8 @@
 		return fmt.Errorf("cannot hot-add VHD as no utility VM is in configuration")
 	}
 
+	defer config.DebugGCS()
+
 	modification := &hcsshim.ResourceModificationRequestResponse{
 		Resource: "MappedVirtualDisk",
 		Data: hcsshim.MappedVirtualDisk{
diff --git a/vendor/github.com/Microsoft/opengcs/client/process.go b/vendor/github.com/Microsoft/opengcs/client/process.go
index 984a95a..958fdb5 100644
--- a/vendor/github.com/Microsoft/opengcs/client/process.go
+++ b/vendor/github.com/Microsoft/opengcs/client/process.go
@@ -3,8 +3,12 @@
 package client
 
 import (
+	"bytes"
 	"fmt"
 	"io"
+	"os"
+	"strings"
+	"time"
 
 	"github.com/Microsoft/hcsshim"
 	"github.com/sirupsen/logrus"
@@ -110,3 +114,44 @@
 	logrus.Debugf("opengcs: runProcess success: %s", commandLine)
 	return process.Process, nil
 }
+
+func debugCommand(s string) string {
+	return fmt.Sprintf(`echo -e 'DEBUG COMMAND: %s\\n--------------\\n';%s;echo -e '\\n\\n';`, s, s)
+}
+
+// DebugGCS extracts logs from the GCS. It's a useful hack for debugging,
+// but not necessarily optimal, but all that is available to us in RS3.
+func (config *Config) DebugGCS() {
+	if logrus.GetLevel() < logrus.DebugLevel || len(os.Getenv("OPENGCS_DEBUG_ENABLE")) == 0 {
+		return
+	}
+
+	var out bytes.Buffer
+	cmd := os.Getenv("OPENGCS_DEBUG_COMMAND")
+	if cmd == "" {
+		cmd = `sh -c "`
+		cmd += debugCommand("ls -l /tmp")
+		cmd += debugCommand("cat /tmp/gcs.log")
+		cmd += debugCommand("ls -l /tmp/gcs")
+		cmd += debugCommand("ls -l /tmp/gcs/*")
+		cmd += debugCommand("cat /tmp/gcs/*/config.json")
+		cmd += debugCommand("ls -lR /var/run/gcsrunc")
+		cmd += debugCommand("cat /var/run/gcsrunc/log.log")
+		cmd += debugCommand("ps -ef")
+		cmd += `"`
+	}
+	proc, err := config.RunProcess(cmd, nil, &out, nil)
+	defer func() {
+		if proc != nil {
+			proc.Kill()
+			proc.Close()
+		}
+	}()
+	if err != nil {
+		logrus.Debugln("benign failure getting gcs logs: ", err)
+	}
+	if proc != nil {
+		proc.WaitTimeout(time.Duration(int(time.Second) * 30))
+	}
+	logrus.Debugf("GCS Debugging:\n%s\n\nEnd GCS Debugging\n", strings.TrimSpace(out.String()))
+}
diff --git a/vendor/github.com/Microsoft/opengcs/client/tartovhd.go b/vendor/github.com/Microsoft/opengcs/client/tartovhd.go
index 9aa6609..29ee489 100644
--- a/vendor/github.com/Microsoft/opengcs/client/tartovhd.go
+++ b/vendor/github.com/Microsoft/opengcs/client/tartovhd.go
@@ -17,6 +17,8 @@
 		return 0, fmt.Errorf("cannot Tar2Vhd as no utility VM is in configuration")
 	}
 
+	defer config.DebugGCS()
+
 	process, err := config.createUtilsProcess("tar2vhd")
 	if err != nil {
 		return 0, fmt.Errorf("failed to start tar2vhd for %s: %s", targetVHDFile, err)
diff --git a/vendor/github.com/Microsoft/opengcs/client/vhdtotar.go b/vendor/github.com/Microsoft/opengcs/client/vhdtotar.go
index 27225a7..72e9a24 100644
--- a/vendor/github.com/Microsoft/opengcs/client/vhdtotar.go
+++ b/vendor/github.com/Microsoft/opengcs/client/vhdtotar.go
@@ -20,6 +20,8 @@
 		return nil, fmt.Errorf("cannot VhdToTar as no utility VM is in configuration")
 	}
 
+	defer config.DebugGCS()
+
 	vhdHandle, err := os.Open(vhdFile)
 	if err != nil {
 		return nil, fmt.Errorf("opengcs: VhdToTar: failed to open %s: %s", vhdFile, err)
diff --git a/vendor/github.com/Microsoft/opengcs/service/gcsutils/README b/vendor/github.com/Microsoft/opengcs/service/gcsutils/README
new file mode 100644
index 0000000..85e5442
--- /dev/null
+++ b/vendor/github.com/Microsoft/opengcs/service/gcsutils/README
@@ -0,0 +1,4 @@
+1. This program only runs in Linux. So you just first copy the files over to a Linux machine. 
+2. Get Go and and then run make get-deps && make. This is set the $GOPATH for you and build the binaries.
+3. vhd_to_tar and tar_to_vhd are the standalone executables that read/write to stdin/out and do the tar <-> vhd conversion.
+   tar2vhd_server is the service VM server that takes client requests over hvsock.
diff --git a/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go b/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go
new file mode 100644
index 0000000..f1f2c04
--- /dev/null
+++ b/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/defs.go
@@ -0,0 +1,109 @@
+package remotefs
+
+import (
+	"errors"
+	"os"
+	"time"
+)
+
+// RemotefsCmd is the name of the remotefs meta command
+const RemotefsCmd = "remotefs"
+
+// Name of the commands when called from the cli context (remotefs <CMD> ...)
+const (
+	StatCmd           = "stat"
+	LstatCmd          = "lstat"
+	ReadlinkCmd       = "readlink"
+	MkdirCmd          = "mkdir"
+	MkdirAllCmd       = "mkdirall"
+	RemoveCmd         = "remove"
+	RemoveAllCmd      = "removeall"
+	LinkCmd           = "link"
+	SymlinkCmd        = "symlink"
+	LchmodCmd         = "lchmod"
+	LchownCmd         = "lchown"
+	MknodCmd          = "mknod"
+	MkfifoCmd         = "mkfifo"
+	OpenFileCmd       = "openfile"
+	ReadFileCmd       = "readfile"
+	WriteFileCmd      = "writefile"
+	ReadDirCmd        = "readdir"
+	ResolvePathCmd    = "resolvepath"
+	ExtractArchiveCmd = "extractarchive"
+	ArchivePathCmd    = "archivepath"
+)
+
+// ErrInvalid is returned if the parameters are invalid
+var ErrInvalid = errors.New("invalid arguments")
+
+// ErrUnknown is returned for an unknown remotefs command
+var ErrUnknown = errors.New("unkown command")
+
+// ExportedError is the serialized version of the a Go error.
+// It also provides a trivial implementation of the error interface.
+type ExportedError struct {
+	ErrString string
+	ErrNum    int `json:",omitempty"`
+}
+
+// Error returns an error string
+func (ee *ExportedError) Error() string {
+	return ee.ErrString
+}
+
+// FileInfo is the stat struct returned by the remotefs system. It
+// fulfills the os.FileInfo interface.
+type FileInfo struct {
+	NameVar    string
+	SizeVar    int64
+	ModeVar    os.FileMode
+	ModTimeVar int64 // Serialization of time.Time breaks in travis, so use an int
+	IsDirVar   bool
+}
+
+var _ os.FileInfo = &FileInfo{}
+
+// Name returns the filename from a FileInfo structure
+func (f *FileInfo) Name() string { return f.NameVar }
+
+// Size returns the size from a FileInfo structure
+func (f *FileInfo) Size() int64 { return f.SizeVar }
+
+// Mode returns the mode from a FileInfo structure
+func (f *FileInfo) Mode() os.FileMode { return f.ModeVar }
+
+// ModTime returns the modification time from a FileInfo structure
+func (f *FileInfo) ModTime() time.Time { return time.Unix(0, f.ModTimeVar) }
+
+// IsDir returns the is-directory indicator from a FileInfo structure
+func (f *FileInfo) IsDir() bool { return f.IsDirVar }
+
+// Sys provides an interface to a FileInfo structure
+func (f *FileInfo) Sys() interface{} { return nil }
+
+// FileHeader is a header for remote *os.File operations for remotefs.OpenFile
+type FileHeader struct {
+	Cmd  uint32
+	Size uint64
+}
+
+const (
+	// Read request command.
+	Read uint32 = iota
+	// Write request command.
+	Write
+	// Seek request command.
+	Seek
+	// Close request command.
+	Close
+	// CmdOK is a response meaning request succeeded.
+	CmdOK
+	// CmdFailed is a response meaning request failed.
+	CmdFailed
+)
+
+// SeekHeader is header for the Seek operation for remotefs.OpenFile
+type SeekHeader struct {
+	Offset int64
+	Whence int32
+}
diff --git a/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go b/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go
new file mode 100644
index 0000000..2d4a9f2
--- /dev/null
+++ b/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/remotefs.go
@@ -0,0 +1,546 @@
+// +build !windows
+
+package remotefs
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"io"
+	"os"
+	"path/filepath"
+	"strconv"
+
+	"github.com/docker/docker/pkg/archive"
+	"github.com/docker/docker/pkg/symlink"
+	"golang.org/x/sys/unix"
+)
+
+// Func is the function definition for a generic remote fs function
+// The input to the function is any serialized structs / data from in and the string slice
+// from args. The output of the function will be serialized and written to out.
+type Func func(stdin io.Reader, stdout io.Writer, args []string) error
+
+// Commands provide a string -> remotefs function mapping.
+// This is useful for commandline programs that will receive a string
+// as the function to execute.
+var Commands = map[string]Func{
+	StatCmd:           Stat,
+	LstatCmd:          Lstat,
+	ReadlinkCmd:       Readlink,
+	MkdirCmd:          Mkdir,
+	MkdirAllCmd:       MkdirAll,
+	RemoveCmd:         Remove,
+	RemoveAllCmd:      RemoveAll,
+	LinkCmd:           Link,
+	SymlinkCmd:        Symlink,
+	LchmodCmd:         Lchmod,
+	LchownCmd:         Lchown,
+	MknodCmd:          Mknod,
+	MkfifoCmd:         Mkfifo,
+	OpenFileCmd:       OpenFile,
+	ReadFileCmd:       ReadFile,
+	WriteFileCmd:      WriteFile,
+	ReadDirCmd:        ReadDir,
+	ResolvePathCmd:    ResolvePath,
+	ExtractArchiveCmd: ExtractArchive,
+	ArchivePathCmd:    ArchivePath,
+}
+
+// Stat functions like os.Stat.
+// Args:
+// - args[0] is the path
+// Out:
+// - out = FileInfo object
+func Stat(in io.Reader, out io.Writer, args []string) error {
+	return stat(in, out, args, os.Stat)
+}
+
+// Lstat functions like os.Lstat.
+// Args:
+// - args[0] is the path
+// Out:
+// - out = FileInfo object
+func Lstat(in io.Reader, out io.Writer, args []string) error {
+	return stat(in, out, args, os.Lstat)
+}
+
+func stat(in io.Reader, out io.Writer, args []string, statfunc func(string) (os.FileInfo, error)) error {
+	if len(args) < 1 {
+		return ErrInvalid
+	}
+
+	fi, err := statfunc(args[0])
+	if err != nil {
+		return err
+	}
+
+	info := FileInfo{
+		NameVar:    fi.Name(),
+		SizeVar:    fi.Size(),
+		ModeVar:    fi.Mode(),
+		ModTimeVar: fi.ModTime().UnixNano(),
+		IsDirVar:   fi.IsDir(),
+	}
+
+	buf, err := json.Marshal(info)
+	if err != nil {
+		return err
+	}
+
+	if _, err := out.Write(buf); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Readlink works like os.Readlink
+// In:
+//  - args[0] is path
+// Out:
+//  - Write link result to out
+func Readlink(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 1 {
+		return ErrInvalid
+	}
+
+	l, err := os.Readlink(args[0])
+	if err != nil {
+		return err
+	}
+
+	if _, err := out.Write([]byte(l)); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Mkdir works like os.Mkdir
+// Args:
+// - args[0] is the path
+// - args[1] is the permissions in octal (like 0755)
+func Mkdir(in io.Reader, out io.Writer, args []string) error {
+	return mkdir(in, out, args, os.Mkdir)
+}
+
+// MkdirAll works like os.MkdirAll.
+// Args:
+// - args[0] is the path
+// - args[1] is the permissions in octal (like 0755)
+func MkdirAll(in io.Reader, out io.Writer, args []string) error {
+	return mkdir(in, out, args, os.MkdirAll)
+}
+
+func mkdir(in io.Reader, out io.Writer, args []string, mkdirFunc func(string, os.FileMode) error) error {
+	if len(args) < 2 {
+		return ErrInvalid
+	}
+
+	perm, err := strconv.ParseUint(args[1], 8, 32)
+	if err != nil {
+		return err
+	}
+	return mkdirFunc(args[0], os.FileMode(perm))
+}
+
+// Remove works like os.Remove
+// Args:
+//	- args[0] is the path
+func Remove(in io.Reader, out io.Writer, args []string) error {
+	return remove(in, out, args, os.Remove)
+}
+
+// RemoveAll works like os.RemoveAll
+// Args:
+//  - args[0] is the path
+func RemoveAll(in io.Reader, out io.Writer, args []string) error {
+	return remove(in, out, args, os.RemoveAll)
+}
+
+func remove(in io.Reader, out io.Writer, args []string, removefunc func(string) error) error {
+	if len(args) < 1 {
+		return ErrInvalid
+	}
+	return removefunc(args[0])
+}
+
+// Link works like os.Link
+// Args:
+//  - args[0] = old path name (link source)
+//  - args[1] = new path name (link dest)
+func Link(in io.Reader, out io.Writer, args []string) error {
+	return link(in, out, args, os.Link)
+}
+
+// Symlink works like os.Symlink
+// Args:
+//  - args[0] = old path name (link source)
+//  - args[1] = new path name (link dest)
+func Symlink(in io.Reader, out io.Writer, args []string) error {
+	return link(in, out, args, os.Symlink)
+}
+
+func link(in io.Reader, out io.Writer, args []string, linkfunc func(string, string) error) error {
+	if len(args) < 2 {
+		return ErrInvalid
+	}
+	return linkfunc(args[0], args[1])
+}
+
+// Lchmod changes permission of the given file without following symlinks
+// Args:
+//  - args[0] = path
+//  - args[1] = permission mode in octal (like 0755)
+func Lchmod(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 2 {
+		return ErrInvalid
+	}
+
+	perm, err := strconv.ParseUint(args[1], 8, 32)
+	if err != nil {
+		return err
+	}
+
+	path := args[0]
+	if !filepath.IsAbs(path) {
+		path, err = filepath.Abs(path)
+		if err != nil {
+			return err
+		}
+	}
+	return unix.Fchmodat(0, path, uint32(perm), unix.AT_SYMLINK_NOFOLLOW)
+}
+
+// Lchown works like os.Lchown
+// Args:
+//  - args[0] = path
+//  - args[1] = uid in base 10
+//  - args[2] = gid in base 10
+func Lchown(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 3 {
+		return ErrInvalid
+	}
+
+	uid, err := strconv.ParseInt(args[1], 10, 64)
+	if err != nil {
+		return err
+	}
+
+	gid, err := strconv.ParseInt(args[2], 10, 64)
+	if err != nil {
+		return err
+	}
+	return os.Lchown(args[0], int(uid), int(gid))
+}
+
+// Mknod works like syscall.Mknod
+// Args:
+//  - args[0] = path
+//  - args[1] = permission mode in octal (like 0755)
+//  - args[2] = major device number in base 10
+//  - args[3] = minor device number in base 10
+func Mknod(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 4 {
+		return ErrInvalid
+	}
+
+	perm, err := strconv.ParseUint(args[1], 8, 32)
+	if err != nil {
+		return err
+	}
+
+	major, err := strconv.ParseInt(args[2], 10, 32)
+	if err != nil {
+		return err
+	}
+
+	minor, err := strconv.ParseInt(args[3], 10, 32)
+	if err != nil {
+		return err
+	}
+
+	dev := unix.Mkdev(uint32(major), uint32(minor))
+	return unix.Mknod(args[0], uint32(perm), int(dev))
+}
+
+// Mkfifo creates a FIFO special file with the given path name and permissions
+// Args:
+// 	- args[0] = path
+//  - args[1] = permission mode in octal (like 0755)
+func Mkfifo(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 2 {
+		return ErrInvalid
+	}
+
+	perm, err := strconv.ParseUint(args[1], 8, 32)
+	if err != nil {
+		return err
+	}
+	return unix.Mkfifo(args[0], uint32(perm))
+}
+
+// OpenFile works like os.OpenFile. To manage the file pointer state,
+// this function acts as a single file "file server" with Read/Write/Close
+// being serialized control codes from in.
+// Args:
+//  - args[0] = path
+//  - args[1] = flag in base 10
+//  - args[2] = permission mode in octal (like 0755)
+func OpenFile(in io.Reader, out io.Writer, args []string) (err error) {
+	defer func() {
+		if err != nil {
+			// error code will be serialized by the caller, so don't write it here
+			WriteFileHeader(out, &FileHeader{Cmd: CmdFailed}, nil)
+		}
+	}()
+
+	if len(args) < 3 {
+		return ErrInvalid
+	}
+
+	flag, err := strconv.ParseInt(args[1], 10, 32)
+	if err != nil {
+		return err
+	}
+
+	perm, err := strconv.ParseUint(args[2], 8, 32)
+	if err != nil {
+		return err
+	}
+
+	f, err := os.OpenFile(args[0], int(flag), os.FileMode(perm))
+	if err != nil {
+		return err
+	}
+
+	// Signal the client that OpenFile succeeded
+	if err := WriteFileHeader(out, &FileHeader{Cmd: CmdOK}, nil); err != nil {
+		return err
+	}
+
+	for {
+		hdr, err := ReadFileHeader(in)
+		if err != nil {
+			return err
+		}
+
+		var buf []byte
+		switch hdr.Cmd {
+		case Read:
+			buf = make([]byte, hdr.Size, hdr.Size)
+			n, err := f.Read(buf)
+			if err != nil {
+				return err
+			}
+			buf = buf[:n]
+		case Write:
+			if _, err := io.CopyN(f, in, int64(hdr.Size)); err != nil {
+				return err
+			}
+		case Seek:
+			seekHdr := &SeekHeader{}
+			if err := binary.Read(in, binary.BigEndian, seekHdr); err != nil {
+				return err
+			}
+			res, err := f.Seek(seekHdr.Offset, int(seekHdr.Whence))
+			if err != nil {
+				return err
+			}
+			buffer := &bytes.Buffer{}
+			if err := binary.Write(buffer, binary.BigEndian, res); err != nil {
+				return err
+			}
+			buf = buffer.Bytes()
+		case Close:
+			if err := f.Close(); err != nil {
+				return err
+			}
+		default:
+			return ErrUnknown
+		}
+
+		retHdr := &FileHeader{
+			Cmd:  CmdOK,
+			Size: uint64(len(buf)),
+		}
+		if err := WriteFileHeader(out, retHdr, buf); err != nil {
+			return err
+		}
+
+		if hdr.Cmd == Close {
+			break
+		}
+	}
+	return nil
+}
+
+// ReadFile works like ioutil.ReadFile but instead writes the file to a writer
+// Args:
+//  - args[0] = path
+// Out:
+//  - Write file contents to out
+func ReadFile(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 1 {
+		return ErrInvalid
+	}
+
+	f, err := os.Open(args[0])
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	if _, err := io.Copy(out, f); err != nil {
+		return nil
+	}
+	return nil
+}
+
+// WriteFile works like ioutil.WriteFile but instead reads the file from a reader
+// Args:
+//  - args[0] = path
+//  - args[1] = permission mode in octal (like 0755)
+//  - input data stream from in
+func WriteFile(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 2 {
+		return ErrInvalid
+	}
+
+	perm, err := strconv.ParseUint(args[1], 8, 32)
+	if err != nil {
+		return err
+	}
+
+	f, err := os.OpenFile(args[0], os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(perm))
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	if _, err := io.Copy(f, in); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ReadDir works like *os.File.Readdir but instead writes the result to a writer
+// Args:
+//  - args[0] = path
+//  - args[1] = number of directory entries to return. If <= 0, return all entries in directory
+func ReadDir(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 2 {
+		return ErrInvalid
+	}
+
+	n, err := strconv.ParseInt(args[1], 10, 32)
+	if err != nil {
+		return err
+	}
+
+	f, err := os.Open(args[0])
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	infos, err := f.Readdir(int(n))
+	if err != nil {
+		return err
+	}
+
+	fileInfos := make([]FileInfo, len(infos))
+	for i := range infos {
+		fileInfos[i] = FileInfo{
+			NameVar:    infos[i].Name(),
+			SizeVar:    infos[i].Size(),
+			ModeVar:    infos[i].Mode(),
+			ModTimeVar: infos[i].ModTime().UnixNano(),
+			IsDirVar:   infos[i].IsDir(),
+		}
+	}
+
+	buf, err := json.Marshal(fileInfos)
+	if err != nil {
+		return err
+	}
+
+	if _, err := out.Write(buf); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ResolvePath works like docker's symlink.FollowSymlinkInScope.
+// It takens in a `path` and a `root` and evaluates symlinks in `path`
+// as if they were scoped in `root`. `path` must be a child path of `root`.
+// In other words, `path` must have `root` as a prefix.
+// Example:
+// path=/foo/bar -> /baz
+// root=/foo,
+// Expected result = /foo/baz
+//
+// Args:
+// - args[0] is `path`
+// - args[1] is `root`
+// Out:
+// - Write resolved path to stdout
+func ResolvePath(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 2 {
+		return ErrInvalid
+	}
+	res, err := symlink.FollowSymlinkInScope(args[0], args[1])
+	if err != nil {
+		return err
+	}
+	if _, err = out.Write([]byte(res)); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ExtractArchive extracts the archive read from in.
+// Args:
+// - in = size of json | json of archive.TarOptions | input tar stream
+// - args[0] = extract directory name
+func ExtractArchive(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 1 {
+		return ErrInvalid
+	}
+
+	opts, err := ReadTarOptions(in)
+	if err != nil {
+		return err
+	}
+
+	if err := archive.Untar(in, args[0], opts); err != nil {
+		return err
+	}
+	return nil
+}
+
+// ArchivePath archives the given directory and writes it to out.
+// Args:
+// - in = size of json | json of archive.TarOptions
+// - args[0] = source directory name
+// Out:
+// - out = tar file of the archive
+func ArchivePath(in io.Reader, out io.Writer, args []string) error {
+	if len(args) < 1 {
+		return ErrInvalid
+	}
+
+	opts, err := ReadTarOptions(in)
+	if err != nil {
+		return err
+	}
+
+	r, err := archive.TarWithOptions(args[0], opts)
+	if err != nil {
+		return err
+	}
+
+	if _, err := io.Copy(out, r); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go b/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go
new file mode 100644
index 0000000..a12827c
--- /dev/null
+++ b/vendor/github.com/Microsoft/opengcs/service/gcsutils/remotefs/utils.go
@@ -0,0 +1,168 @@
+package remotefs
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"os"
+	"syscall"
+
+	"github.com/docker/docker/pkg/archive"
+)
+
+// ReadError is an utility function that reads a serialized error from the given reader
+// and deserializes it.
+func ReadError(in io.Reader) (*ExportedError, error) {
+	b, err := ioutil.ReadAll(in)
+	if err != nil {
+		return nil, err
+	}
+
+	// No error
+	if len(b) == 0 {
+		return nil, nil
+	}
+
+	var exportedErr ExportedError
+	if err := json.Unmarshal(b, &exportedErr); err != nil {
+		return nil, err
+	}
+
+	return &exportedErr, nil
+}
+
+// ExportedToError will convert a ExportedError to an error. It will try to match
+// the error to any existing known error like os.ErrNotExist. Otherwise, it will just
+// return an implementation of the error interface.
+func ExportedToError(ee *ExportedError) error {
+	if ee.Error() == os.ErrNotExist.Error() {
+		return os.ErrNotExist
+	} else if ee.Error() == os.ErrExist.Error() {
+		return os.ErrExist
+	} else if ee.Error() == os.ErrPermission.Error() {
+		return os.ErrPermission
+	}
+	return ee
+}
+
+// WriteError is an utility function that serializes the error
+// and writes it to the output writer.
+func WriteError(err error, out io.Writer) error {
+	if err == nil {
+		return nil
+	}
+	err = fixOSError(err)
+
+	var errno int
+	switch typedError := err.(type) {
+	case *os.PathError:
+		if se, ok := typedError.Err.(syscall.Errno); ok {
+			errno = int(se)
+		}
+	case *os.LinkError:
+		if se, ok := typedError.Err.(syscall.Errno); ok {
+			errno = int(se)
+		}
+	case *os.SyscallError:
+		if se, ok := typedError.Err.(syscall.Errno); ok {
+			errno = int(se)
+		}
+	}
+
+	exportedError := &ExportedError{
+		ErrString: err.Error(),
+		ErrNum:    errno,
+	}
+
+	b, err1 := json.Marshal(exportedError)
+	if err1 != nil {
+		return err1
+	}
+
+	_, err1 = out.Write(b)
+	if err1 != nil {
+		return err1
+	}
+	return nil
+}
+
+// fixOSError converts possible platform dependent error into the portable errors in the
+// Go os package if possible.
+func fixOSError(err error) error {
+	// The os.IsExist, os.IsNotExist, and os.IsPermissions functions are platform
+	// dependent, so sending the raw error might break those functions on a different OS.
+	// Go defines portable errors for these.
+	if os.IsExist(err) {
+		return os.ErrExist
+	} else if os.IsNotExist(err) {
+		return os.ErrNotExist
+	} else if os.IsPermission(err) {
+		return os.ErrPermission
+	}
+	return err
+}
+
+// ReadTarOptions reads from the specified reader and deserializes an archive.TarOptions struct.
+func ReadTarOptions(r io.Reader) (*archive.TarOptions, error) {
+	var size uint64
+	if err := binary.Read(r, binary.BigEndian, &size); err != nil {
+		return nil, err
+	}
+
+	rawJSON := make([]byte, size)
+	if _, err := io.ReadFull(r, rawJSON); err != nil {
+		return nil, err
+	}
+
+	var opts archive.TarOptions
+	if err := json.Unmarshal(rawJSON, &opts); err != nil {
+		return nil, err
+	}
+	return &opts, nil
+}
+
+// WriteTarOptions serializes a archive.TarOptions struct and writes it to the writer.
+func WriteTarOptions(w io.Writer, opts *archive.TarOptions) error {
+	optsBuf, err := json.Marshal(opts)
+	if err != nil {
+		return err
+	}
+
+	optsSize := uint64(len(optsBuf))
+	optsSizeBuf := &bytes.Buffer{}
+	if err := binary.Write(optsSizeBuf, binary.BigEndian, optsSize); err != nil {
+		return err
+	}
+
+	if _, err := optsSizeBuf.WriteTo(w); err != nil {
+		return err
+	}
+
+	if _, err := w.Write(optsBuf); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// ReadFileHeader reads from r and returns a deserialized FileHeader
+func ReadFileHeader(r io.Reader) (*FileHeader, error) {
+	hdr := &FileHeader{}
+	if err := binary.Read(r, binary.BigEndian, hdr); err != nil {
+		return nil, err
+	}
+	return hdr, nil
+}
+
+// WriteFileHeader serializes a FileHeader and writes it to w, along with any extra data
+func WriteFileHeader(w io.Writer, hdr *FileHeader, extraData []byte) error {
+	if err := binary.Write(w, binary.BigEndian, hdr); err != nil {
+		return err
+	}
+	if _, err := w.Write(extraData); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/LICENSE b/vendor/github.com/containerd/cgroups/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/containerd/cgroups/README.md b/vendor/github.com/containerd/cgroups/README.md
new file mode 100644
index 0000000..69e932a
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/README.md
@@ -0,0 +1,112 @@
+# cgroups
+
+[![Build Status](https://travis-ci.org/containerd/cgroups.svg?branch=master)](https://travis-ci.org/containerd/cgroups)
+
+[![codecov](https://codecov.io/gh/containerd/cgroups/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/cgroups)
+
+Go package for creating, managing, inspecting, and destroying cgroups.
+The resources format for settings on the cgroup uses the OCI runtime-spec found
+[here](https://github.com/opencontainers/runtime-spec).
+
+## Examples
+
+### Create a new cgroup
+
+This creates a new cgroup using a static path for all subsystems under `/test`.
+
+* /sys/fs/cgroup/cpu/test
+* /sys/fs/cgroup/memory/test
+* etc....
+
+It uses a single hierarchy and specifies cpu shares as a resource constraint and
+uses the v1 implementation of cgroups.
+
+
+```go
+shares := uint64(100)
+control, err := cgroups.New(cgroups.V1, cgroups.StaticPath("/test"), &specs.LinuxResources{
+    CPU: &specs.CPU{
+        Shares: &shares,
+    },
+})
+defer control.Delete()
+```
+
+### Create with systemd slice support
+
+
+```go
+control, err := cgroups.New(cgroups.Systemd, cgroups.Slice("system.slice", "runc-test"), &specs.LinuxResources{
+    CPU: &specs.CPU{
+        Shares: &shares,
+    },
+})
+
+```
+
+### Load an existing cgroup
+
+```go
+control, err = cgroups.Load(cgroups.V1, cgroups.StaticPath("/test"))
+```
+
+### Add a process to the cgroup
+
+```go
+if err := control.Add(cgroups.Process{Pid:1234}); err != nil {
+}
+```
+
+###  Update the cgroup 
+
+To update the resources applied in the cgroup
+
+```go
+shares = uint64(200)
+if err := control.Update(&specs.LinuxResources{
+    CPU: &specs.CPU{
+        Shares: &shares,
+    },
+}); err != nil {
+}
+```
+
+### Freeze and Thaw the cgroup
+
+```go
+if err := control.Freeze(); err != nil {
+}
+if err := control.Thaw(); err != nil {
+}
+```
+
+### List all processes in the cgroup or recursively
+
+```go
+processes, err := control.Processes(cgroups.Devices, recursive)
+```
+
+### Get Stats on the cgroup
+
+```go
+stats, err := control.Stat()
+```
+
+By adding `cgroups.IgnoreNotExist` all non-existent files will be ignored, e.g. swap memory stats without swap enabled
+```go
+stats, err := control.Stat(cgroups.IgnoreNotExist)
+```
+
+### Move process across cgroups
+
+This allows you to take processes from one cgroup and move them to another.
+
+```go
+err := control.MoveTo(destination)
+```
+
+### Create subcgroup
+
+```go
+subCgroup, err := control.New("child", resources)
+```
diff --git a/vendor/github.com/containerd/cgroups/blkio.go b/vendor/github.com/containerd/cgroups/blkio.go
new file mode 100644
index 0000000..078d12b
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/blkio.go
@@ -0,0 +1,323 @@
+package cgroups
+
+import (
+	"bufio"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewBlkio(root string) *blkioController {
+	return &blkioController{
+		root: filepath.Join(root, string(Blkio)),
+	}
+}
+
+type blkioController struct {
+	root string
+}
+
+func (b *blkioController) Name() Name {
+	return Blkio
+}
+
+func (b *blkioController) Path(path string) string {
+	return filepath.Join(b.root, path)
+}
+
+func (b *blkioController) Create(path string, resources *specs.LinuxResources) error {
+	if err := os.MkdirAll(b.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	if resources.BlockIO == nil {
+		return nil
+	}
+	for _, t := range createBlkioSettings(resources.BlockIO) {
+		if t.value != nil {
+			if err := ioutil.WriteFile(
+				filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", t.name)),
+				t.format(t.value),
+				defaultFilePerm,
+			); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (b *blkioController) Update(path string, resources *specs.LinuxResources) error {
+	return b.Create(path, resources)
+}
+
+func (b *blkioController) Stat(path string, stats *Metrics) error {
+	stats.Blkio = &BlkIOStat{}
+	settings := []blkioStatSettings{
+		{
+			name:  "throttle.io_serviced",
+			entry: &stats.Blkio.IoServicedRecursive,
+		},
+		{
+			name:  "throttle.io_service_bytes",
+			entry: &stats.Blkio.IoServiceBytesRecursive,
+		},
+	}
+	// Try to read CFQ stats available on all CFQ enabled kernels first
+	if _, err := os.Lstat(filepath.Join(b.Path(path), fmt.Sprintf("blkio.io_serviced_recursive"))); err == nil {
+		settings = append(settings,
+			blkioStatSettings{
+				name:  "sectors_recursive",
+				entry: &stats.Blkio.SectorsRecursive,
+			},
+			blkioStatSettings{
+				name:  "io_service_bytes_recursive",
+				entry: &stats.Blkio.IoServiceBytesRecursive,
+			},
+			blkioStatSettings{
+				name:  "io_serviced_recursive",
+				entry: &stats.Blkio.IoServicedRecursive,
+			},
+			blkioStatSettings{
+				name:  "io_queued_recursive",
+				entry: &stats.Blkio.IoQueuedRecursive,
+			},
+			blkioStatSettings{
+				name:  "io_service_time_recursive",
+				entry: &stats.Blkio.IoServiceTimeRecursive,
+			},
+			blkioStatSettings{
+				name:  "io_wait_time_recursive",
+				entry: &stats.Blkio.IoWaitTimeRecursive,
+			},
+			blkioStatSettings{
+				name:  "io_merged_recursive",
+				entry: &stats.Blkio.IoMergedRecursive,
+			},
+			blkioStatSettings{
+				name:  "time_recursive",
+				entry: &stats.Blkio.IoTimeRecursive,
+			},
+		)
+	}
+
+	devices, err := getDevices("/dev")
+	if err != nil {
+		return err
+	}
+
+	for _, t := range settings {
+		if err := b.readEntry(devices, path, t.name, t.entry); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (b *blkioController) readEntry(devices map[deviceKey]string, path, name string, entry *[]*BlkIOEntry) error {
+	f, err := os.Open(filepath.Join(b.Path(path), fmt.Sprintf("blkio.%s", name)))
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	sc := bufio.NewScanner(f)
+	for sc.Scan() {
+		if err := sc.Err(); err != nil {
+			return err
+		}
+		// format: dev type amount
+		fields := strings.FieldsFunc(sc.Text(), splitBlkIOStatLine)
+		if len(fields) < 3 {
+			if len(fields) == 2 && fields[0] == "Total" {
+				// skip total line
+				continue
+			} else {
+				return fmt.Errorf("Invalid line found while parsing %s: %s", path, sc.Text())
+			}
+		}
+		major, err := strconv.ParseUint(fields[0], 10, 64)
+		if err != nil {
+			return err
+		}
+		minor, err := strconv.ParseUint(fields[1], 10, 64)
+		if err != nil {
+			return err
+		}
+		op := ""
+		valueField := 2
+		if len(fields) == 4 {
+			op = fields[2]
+			valueField = 3
+		}
+		v, err := strconv.ParseUint(fields[valueField], 10, 64)
+		if err != nil {
+			return err
+		}
+		*entry = append(*entry, &BlkIOEntry{
+			Device: devices[deviceKey{major, minor}],
+			Major:  major,
+			Minor:  minor,
+			Op:     op,
+			Value:  v,
+		})
+	}
+	return nil
+}
+
+func createBlkioSettings(blkio *specs.LinuxBlockIO) []blkioSettings {
+	settings := []blkioSettings{
+		{
+			name:   "weight",
+			value:  blkio.Weight,
+			format: uintf,
+		},
+		{
+			name:   "leaf_weight",
+			value:  blkio.LeafWeight,
+			format: uintf,
+		},
+	}
+	for _, wd := range blkio.WeightDevice {
+		settings = append(settings,
+			blkioSettings{
+				name:   "weight_device",
+				value:  wd,
+				format: weightdev,
+			},
+			blkioSettings{
+				name:   "leaf_weight_device",
+				value:  wd,
+				format: weightleafdev,
+			})
+	}
+	for _, t := range []struct {
+		name string
+		list []specs.LinuxThrottleDevice
+	}{
+		{
+			name: "throttle.read_bps_device",
+			list: blkio.ThrottleReadBpsDevice,
+		},
+		{
+			name: "throttle.read_iops_device",
+			list: blkio.ThrottleReadIOPSDevice,
+		},
+		{
+			name: "throttle.write_bps_device",
+			list: blkio.ThrottleWriteBpsDevice,
+		},
+		{
+			name: "throttle.write_iops_device",
+			list: blkio.ThrottleWriteIOPSDevice,
+		},
+	} {
+		for _, td := range t.list {
+			settings = append(settings, blkioSettings{
+				name:   t.name,
+				value:  td,
+				format: throttleddev,
+			})
+		}
+	}
+	return settings
+}
+
+type blkioSettings struct {
+	name   string
+	value  interface{}
+	format func(v interface{}) []byte
+}
+
+type blkioStatSettings struct {
+	name  string
+	entry *[]*BlkIOEntry
+}
+
+func uintf(v interface{}) []byte {
+	return []byte(strconv.FormatUint(uint64(*v.(*uint16)), 10))
+}
+
+func weightdev(v interface{}) []byte {
+	wd := v.(specs.LinuxWeightDevice)
+	return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.Weight))
+}
+
+func weightleafdev(v interface{}) []byte {
+	wd := v.(specs.LinuxWeightDevice)
+	return []byte(fmt.Sprintf("%d:%d %d", wd.Major, wd.Minor, wd.LeafWeight))
+}
+
+func throttleddev(v interface{}) []byte {
+	td := v.(specs.LinuxThrottleDevice)
+	return []byte(fmt.Sprintf("%d:%d %d", td.Major, td.Minor, td.Rate))
+}
+
+func splitBlkIOStatLine(r rune) bool {
+	return r == ' ' || r == ':'
+}
+
+type deviceKey struct {
+	major, minor uint64
+}
+
+// getDevices makes a best effort attempt to read all the devices into a map
+// keyed by major and minor number. Since devices may be mapped multiple times,
+// we err on taking the first occurrence.
+func getDevices(path string) (map[deviceKey]string, error) {
+	// TODO(stevvooe): We are ignoring lots of errors. It might be kind of
+	// challenging to debug this if we aren't mapping devices correctly.
+	// Consider logging these errors.
+	devices := map[deviceKey]string{}
+	if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		switch {
+		case fi.IsDir():
+			switch fi.Name() {
+			case "pts", "shm", "fd", "mqueue", ".lxc", ".lxd-mounts":
+				return filepath.SkipDir
+			default:
+				return nil
+			}
+		case fi.Name() == "console":
+			return nil
+		default:
+			if fi.Mode()&os.ModeDevice == 0 {
+				// skip non-devices
+				return nil
+			}
+
+			st, ok := fi.Sys().(*syscall.Stat_t)
+			if !ok {
+				return fmt.Errorf("%s: unable to convert to system stat", p)
+			}
+
+			key := deviceKey{major(st.Rdev), minor(st.Rdev)}
+			if _, ok := devices[key]; ok {
+				return nil // skip it if we have already populated the path.
+			}
+
+			devices[key] = p
+		}
+
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	return devices, nil
+}
+
+func major(devNumber uint64) uint64 {
+	return (devNumber >> 8) & 0xfff
+}
+
+func minor(devNumber uint64) uint64 {
+	return (devNumber & 0xff) | ((devNumber >> 12) & 0xfff00)
+}
diff --git a/vendor/github.com/containerd/cgroups/cgroup.go b/vendor/github.com/containerd/cgroups/cgroup.go
new file mode 100644
index 0000000..d9fa75f
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/cgroup.go
@@ -0,0 +1,398 @@
+package cgroups
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"sync"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+// New returns a new control via the cgroup cgroups interface
+func New(hierarchy Hierarchy, path Path, resources *specs.LinuxResources) (Cgroup, error) {
+	subsystems, err := hierarchy()
+	if err != nil {
+		return nil, err
+	}
+	for _, s := range subsystems {
+		if err := initializeSubsystem(s, path, resources); err != nil {
+			return nil, err
+		}
+	}
+	return &cgroup{
+		path:       path,
+		subsystems: subsystems,
+	}, nil
+}
+
+// Load will load an existing cgroup and allow it to be controlled
+func Load(hierarchy Hierarchy, path Path) (Cgroup, error) {
+	subsystems, err := hierarchy()
+	if err != nil {
+		return nil, err
+	}
+	// check the the subsystems still exist
+	for _, s := range pathers(subsystems) {
+		p, err := path(s.Name())
+		if err != nil {
+			if os.IsNotExist(errors.Cause(err)) {
+				return nil, ErrCgroupDeleted
+			}
+			return nil, err
+		}
+		if _, err := os.Lstat(s.Path(p)); err != nil {
+			if os.IsNotExist(err) {
+				return nil, ErrCgroupDeleted
+			}
+			return nil, err
+		}
+	}
+	return &cgroup{
+		path:       path,
+		subsystems: subsystems,
+	}, nil
+}
+
+type cgroup struct {
+	path Path
+
+	subsystems []Subsystem
+	mu         sync.Mutex
+	err        error
+}
+
+// New returns a new sub cgroup
+func (c *cgroup) New(name string, resources *specs.LinuxResources) (Cgroup, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return nil, c.err
+	}
+	path := subPath(c.path, name)
+	for _, s := range c.subsystems {
+		if err := initializeSubsystem(s, path, resources); err != nil {
+			return nil, err
+		}
+	}
+	return &cgroup{
+		path:       path,
+		subsystems: c.subsystems,
+	}, nil
+}
+
+// Subsystems returns all the subsystems that are currently being
+// consumed by the group
+func (c *cgroup) Subsystems() []Subsystem {
+	return c.subsystems
+}
+
+// Add moves the provided process into the new cgroup
+func (c *cgroup) Add(process Process) error {
+	if process.Pid <= 0 {
+		return ErrInvalidPid
+	}
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return c.err
+	}
+	return c.add(process)
+}
+
+func (c *cgroup) add(process Process) error {
+	for _, s := range pathers(c.subsystems) {
+		p, err := c.path(s.Name())
+		if err != nil {
+			return err
+		}
+		if err := ioutil.WriteFile(
+			filepath.Join(s.Path(p), cgroupProcs),
+			[]byte(strconv.Itoa(process.Pid)),
+			defaultFilePerm,
+		); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Delete will remove the control group from each of the subsystems registered
+func (c *cgroup) Delete() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return c.err
+	}
+	var errors []string
+	for _, s := range c.subsystems {
+		if d, ok := s.(deleter); ok {
+			sp, err := c.path(s.Name())
+			if err != nil {
+				return err
+			}
+			if err := d.Delete(sp); err != nil {
+				errors = append(errors, string(s.Name()))
+			}
+			continue
+		}
+		if p, ok := s.(pather); ok {
+			sp, err := c.path(s.Name())
+			if err != nil {
+				return err
+			}
+			path := p.Path(sp)
+			if err := remove(path); err != nil {
+				errors = append(errors, path)
+			}
+		}
+	}
+	if len(errors) > 0 {
+		return fmt.Errorf("cgroups: unable to remove paths %s", strings.Join(errors, ", "))
+	}
+	c.err = ErrCgroupDeleted
+	return nil
+}
+
+// Stat returns the current metrics for the cgroup
+func (c *cgroup) Stat(handlers ...ErrorHandler) (*Metrics, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return nil, c.err
+	}
+	if len(handlers) == 0 {
+		handlers = append(handlers, errPassthrough)
+	}
+	var (
+		stats = &Metrics{
+			CPU: &CPUStat{
+				Throttling: &Throttle{},
+				Usage:      &CPUUsage{},
+			},
+		}
+		wg   = &sync.WaitGroup{}
+		errs = make(chan error, len(c.subsystems))
+	)
+	for _, s := range c.subsystems {
+		if ss, ok := s.(stater); ok {
+			sp, err := c.path(s.Name())
+			if err != nil {
+				return nil, err
+			}
+			wg.Add(1)
+			go func() {
+				defer wg.Done()
+				if err := ss.Stat(sp, stats); err != nil {
+					for _, eh := range handlers {
+						if herr := eh(err); herr != nil {
+							errs <- herr
+						}
+					}
+				}
+			}()
+		}
+	}
+	wg.Wait()
+	close(errs)
+	for err := range errs {
+		return nil, err
+	}
+	return stats, nil
+}
+
+// Update updates the cgroup with the new resource values provided
+//
+// Be prepared to handle EBUSY when trying to update a cgroup with
+// live processes and other operations like Stats being performed at the
+// same time
+func (c *cgroup) Update(resources *specs.LinuxResources) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return c.err
+	}
+	for _, s := range c.subsystems {
+		if u, ok := s.(updater); ok {
+			sp, err := c.path(s.Name())
+			if err != nil {
+				return err
+			}
+			if err := u.Update(sp, resources); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// Processes returns the processes running inside the cgroup along
+// with the subsystem used, pid, and path
+func (c *cgroup) Processes(subsystem Name, recursive bool) ([]Process, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return nil, c.err
+	}
+	return c.processes(subsystem, recursive)
+}
+
+func (c *cgroup) processes(subsystem Name, recursive bool) ([]Process, error) {
+	s := c.getSubsystem(subsystem)
+	sp, err := c.path(subsystem)
+	if err != nil {
+		return nil, err
+	}
+	path := s.(pather).Path(sp)
+	var processes []Process
+	err = filepath.Walk(path, func(p string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		if !recursive && info.IsDir() {
+			if p == path {
+				return nil
+			}
+			return filepath.SkipDir
+		}
+		dir, name := filepath.Split(p)
+		if name != cgroupProcs {
+			return nil
+		}
+		procs, err := readPids(dir, subsystem)
+		if err != nil {
+			return err
+		}
+		processes = append(processes, procs...)
+		return nil
+	})
+	return processes, err
+}
+
+// Freeze freezes the entire cgroup and all the processes inside it
+func (c *cgroup) Freeze() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return c.err
+	}
+	s := c.getSubsystem(Freezer)
+	if s == nil {
+		return ErrFreezerNotSupported
+	}
+	sp, err := c.path(Freezer)
+	if err != nil {
+		return err
+	}
+	return s.(*freezerController).Freeze(sp)
+}
+
+// Thaw thaws out the cgroup and all the processes inside it
+func (c *cgroup) Thaw() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return c.err
+	}
+	s := c.getSubsystem(Freezer)
+	if s == nil {
+		return ErrFreezerNotSupported
+	}
+	sp, err := c.path(Freezer)
+	if err != nil {
+		return err
+	}
+	return s.(*freezerController).Thaw(sp)
+}
+
+// OOMEventFD returns the memory cgroup's out of memory event fd that triggers
+// when processes inside the cgroup receive an oom event
+func (c *cgroup) OOMEventFD() (uintptr, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return 0, c.err
+	}
+	s := c.getSubsystem(Memory)
+	if s == nil {
+		return 0, ErrMemoryNotSupported
+	}
+	sp, err := c.path(Memory)
+	if err != nil {
+		return 0, err
+	}
+	return s.(*memoryController).OOMEventFD(sp)
+}
+
+// State returns the state of the cgroup and its processes
+func (c *cgroup) State() State {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	c.checkExists()
+	if c.err != nil && c.err == ErrCgroupDeleted {
+		return Deleted
+	}
+	s := c.getSubsystem(Freezer)
+	if s == nil {
+		return Thawed
+	}
+	sp, err := c.path(Freezer)
+	if err != nil {
+		return Unknown
+	}
+	state, err := s.(*freezerController).state(sp)
+	if err != nil {
+		return Unknown
+	}
+	return state
+}
+
+// MoveTo does a recursive move subsystem by subsystem of all the processes
+// inside the group
+func (c *cgroup) MoveTo(destination Cgroup) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.err != nil {
+		return c.err
+	}
+	for _, s := range c.subsystems {
+		processes, err := c.processes(s.Name(), true)
+		if err != nil {
+			return err
+		}
+		for _, p := range processes {
+			if err := destination.Add(p); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func (c *cgroup) getSubsystem(n Name) Subsystem {
+	for _, s := range c.subsystems {
+		if s.Name() == n {
+			return s
+		}
+	}
+	return nil
+}
+
+func (c *cgroup) checkExists() {
+	for _, s := range pathers(c.subsystems) {
+		p, err := c.path(s.Name())
+		if err != nil {
+			return
+		}
+		if _, err := os.Lstat(s.Path(p)); err != nil {
+			if os.IsNotExist(err) {
+				c.err = ErrCgroupDeleted
+				return
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/containerd/cgroups/control.go b/vendor/github.com/containerd/cgroups/control.go
new file mode 100644
index 0000000..2de2673
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/control.go
@@ -0,0 +1,58 @@
+package cgroups
+
+import (
+	"os"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+	cgroupProcs    = "cgroup.procs"
+	defaultDirPerm = 0755
+)
+
+// defaultFilePerm is a var so that the test framework can change the filemode
+// of all files created when the tests are running.  The difference between the
+// tests and real world use is that files like "cgroup.procs" will exist when writing
+// to a read cgroup filesystem and do not exist prior when running in the tests.
+// this is set to a non 0 value in the test code
+var defaultFilePerm = os.FileMode(0)
+
+type Process struct {
+	// Subsystem is the name of the subsystem that the process is in
+	Subsystem Name
+	// Pid is the process id of the process
+	Pid int
+	// Path is the full path of the subsystem and location that the process is in
+	Path string
+}
+
+// Cgroup handles interactions with the individual groups to perform
+// actions on them as them main interface to this cgroup package
+type Cgroup interface {
+	// New creates a new cgroup under the calling cgroup
+	New(string, *specs.LinuxResources) (Cgroup, error)
+	// Add adds a process to the cgroup
+	Add(Process) error
+	// Delete removes the cgroup as a whole
+	Delete() error
+	// MoveTo moves all the processes under the calling cgroup to the provided one
+	// subsystems are moved one at a time
+	MoveTo(Cgroup) error
+	// Stat returns the stats for all subsystems in the cgroup
+	Stat(...ErrorHandler) (*Metrics, error)
+	// Update updates all the subsystems with the provided resource changes
+	Update(resources *specs.LinuxResources) error
+	// Processes returns all the processes in a select subsystem for the cgroup
+	Processes(Name, bool) ([]Process, error)
+	// Freeze freezes or pauses all processes inside the cgroup
+	Freeze() error
+	// Thaw thaw or resumes all processes inside the cgroup
+	Thaw() error
+	// OOMEventFD returns the memory subsystem's event fd for OOM events
+	OOMEventFD() (uintptr, error)
+	// State returns the cgroups current state
+	State() State
+	// Subsystems returns all the subsystems in the cgroup
+	Subsystems() []Subsystem
+}
diff --git a/vendor/github.com/containerd/cgroups/cpu.go b/vendor/github.com/containerd/cgroups/cpu.go
new file mode 100644
index 0000000..7df1b1c
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/cpu.go
@@ -0,0 +1,113 @@
+package cgroups
+
+import (
+	"bufio"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewCpu(root string) *cpuController {
+	return &cpuController{
+		root: filepath.Join(root, string(Cpu)),
+	}
+}
+
+type cpuController struct {
+	root string
+}
+
+func (c *cpuController) Name() Name {
+	return Cpu
+}
+
+func (c *cpuController) Path(path string) string {
+	return filepath.Join(c.root, path)
+}
+
+func (c *cpuController) Create(path string, resources *specs.LinuxResources) error {
+	if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	if cpu := resources.CPU; cpu != nil {
+		for _, t := range []struct {
+			name   string
+			ivalue *int64
+			uvalue *uint64
+		}{
+			{
+				name:   "rt_period_us",
+				uvalue: cpu.RealtimePeriod,
+			},
+			{
+				name:   "rt_runtime_us",
+				ivalue: cpu.RealtimeRuntime,
+			},
+			{
+				name:   "shares",
+				uvalue: cpu.Shares,
+			},
+			{
+				name:   "cfs_period_us",
+				uvalue: cpu.Period,
+			},
+			{
+				name:   "cfs_quota_us",
+				ivalue: cpu.Quota,
+			},
+		} {
+			var value []byte
+			if t.uvalue != nil {
+				value = []byte(strconv.FormatUint(*t.uvalue, 10))
+			} else if t.ivalue != nil {
+				value = []byte(strconv.FormatInt(*t.ivalue, 10))
+			}
+			if value != nil {
+				if err := ioutil.WriteFile(
+					filepath.Join(c.Path(path), fmt.Sprintf("cpu.%s", t.name)),
+					value,
+					defaultFilePerm,
+				); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (c *cpuController) Update(path string, resources *specs.LinuxResources) error {
+	return c.Create(path, resources)
+}
+
+func (c *cpuController) Stat(path string, stats *Metrics) error {
+	f, err := os.Open(filepath.Join(c.Path(path), "cpu.stat"))
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	// get or create the cpu field because cpuacct can also set values on this struct
+	sc := bufio.NewScanner(f)
+	for sc.Scan() {
+		if err := sc.Err(); err != nil {
+			return err
+		}
+		key, v, err := parseKV(sc.Text())
+		if err != nil {
+			return err
+		}
+		switch key {
+		case "nr_periods":
+			stats.CPU.Throttling.Periods = v
+		case "nr_throttled":
+			stats.CPU.Throttling.ThrottledPeriods = v
+		case "throttled_time":
+			stats.CPU.Throttling.ThrottledTime = v
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/cpuacct.go b/vendor/github.com/containerd/cgroups/cpuacct.go
new file mode 100644
index 0000000..443692d
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/cpuacct.go
@@ -0,0 +1,105 @@
+package cgroups
+
+import (
+	"fmt"
+	"io/ioutil"
+	"path/filepath"
+	"strconv"
+	"strings"
+)
+
+const nanosecondsInSecond = 1000000000
+
+var clockTicks = getClockTicks()
+
+func NewCpuacct(root string) *cpuacctController {
+	return &cpuacctController{
+		root: filepath.Join(root, string(Cpuacct)),
+	}
+}
+
+type cpuacctController struct {
+	root string
+}
+
+func (c *cpuacctController) Name() Name {
+	return Cpuacct
+}
+
+func (c *cpuacctController) Path(path string) string {
+	return filepath.Join(c.root, path)
+}
+
+func (c *cpuacctController) Stat(path string, stats *Metrics) error {
+	user, kernel, err := c.getUsage(path)
+	if err != nil {
+		return err
+	}
+	total, err := readUint(filepath.Join(c.Path(path), "cpuacct.usage"))
+	if err != nil {
+		return err
+	}
+	percpu, err := c.percpuUsage(path)
+	if err != nil {
+		return err
+	}
+	stats.CPU.Usage.Total = total
+	stats.CPU.Usage.User = user
+	stats.CPU.Usage.Kernel = kernel
+	stats.CPU.Usage.PerCPU = percpu
+	return nil
+}
+
+func (c *cpuacctController) percpuUsage(path string) ([]uint64, error) {
+	var usage []uint64
+	data, err := ioutil.ReadFile(filepath.Join(c.Path(path), "cpuacct.usage_percpu"))
+	if err != nil {
+		return nil, err
+	}
+	for _, v := range strings.Fields(string(data)) {
+		u, err := strconv.ParseUint(v, 10, 64)
+		if err != nil {
+			return nil, err
+		}
+		usage = append(usage, u)
+	}
+	return usage, nil
+}
+
+func (c *cpuacctController) getUsage(path string) (user uint64, kernel uint64, err error) {
+	statPath := filepath.Join(c.Path(path), "cpuacct.stat")
+	data, err := ioutil.ReadFile(statPath)
+	if err != nil {
+		return 0, 0, err
+	}
+	fields := strings.Fields(string(data))
+	if len(fields) != 4 {
+		return 0, 0, fmt.Errorf("%q is expected to have 4 fields", statPath)
+	}
+	for _, t := range []struct {
+		index int
+		name  string
+		value *uint64
+	}{
+		{
+			index: 0,
+			name:  "user",
+			value: &user,
+		},
+		{
+			index: 2,
+			name:  "system",
+			value: &kernel,
+		},
+	} {
+		if fields[t.index] != t.name {
+			return 0, 0, fmt.Errorf("expected field %q but found %q in %q", t.name, fields[t.index], statPath)
+		}
+		v, err := strconv.ParseUint(fields[t.index+1], 10, 64)
+		if err != nil {
+			return 0, 0, err
+		}
+		*t.value = v
+	}
+	return (user * nanosecondsInSecond) / clockTicks, (kernel * nanosecondsInSecond) / clockTicks, nil
+}
diff --git a/vendor/github.com/containerd/cgroups/cpuset.go b/vendor/github.com/containerd/cgroups/cpuset.go
new file mode 100644
index 0000000..f0f95f5
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/cpuset.go
@@ -0,0 +1,139 @@
+package cgroups
+
+import (
+	"bytes"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewCputset(root string) *cpusetController {
+	return &cpusetController{
+		root: filepath.Join(root, string(Cpuset)),
+	}
+}
+
+type cpusetController struct {
+	root string
+}
+
+func (c *cpusetController) Name() Name {
+	return Cpuset
+}
+
+func (c *cpusetController) Path(path string) string {
+	return filepath.Join(c.root, path)
+}
+
+func (c *cpusetController) Create(path string, resources *specs.LinuxResources) error {
+	if err := c.ensureParent(c.Path(path), c.root); err != nil {
+		return err
+	}
+	if err := os.MkdirAll(c.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	if err := c.copyIfNeeded(c.Path(path), filepath.Dir(c.Path(path))); err != nil {
+		return err
+	}
+	if resources.CPU != nil {
+		for _, t := range []struct {
+			name  string
+			value *string
+		}{
+			{
+				name:  "cpus",
+				value: &resources.CPU.Cpus,
+			},
+			{
+				name:  "mems",
+				value: &resources.CPU.Mems,
+			},
+		} {
+			if t.value != nil {
+				if err := ioutil.WriteFile(
+					filepath.Join(c.Path(path), fmt.Sprintf("cpuset.%s", t.name)),
+					[]byte(*t.value),
+					defaultFilePerm,
+				); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (c *cpusetController) getValues(path string) (cpus []byte, mems []byte, err error) {
+	if cpus, err = ioutil.ReadFile(filepath.Join(path, "cpuset.cpus")); err != nil && !os.IsNotExist(err) {
+		return
+	}
+	if mems, err = ioutil.ReadFile(filepath.Join(path, "cpuset.mems")); err != nil && !os.IsNotExist(err) {
+		return
+	}
+	return cpus, mems, nil
+}
+
+// ensureParent makes sure that the parent directory of current is created
+// and populated with the proper cpus and mems files copied from
+// it's parent.
+func (c *cpusetController) ensureParent(current, root string) error {
+	parent := filepath.Dir(current)
+	if _, err := filepath.Rel(root, parent); err != nil {
+		return nil
+	}
+	// Avoid infinite recursion.
+	if parent == current {
+		return fmt.Errorf("cpuset: cgroup parent path outside cgroup root")
+	}
+	if cleanPath(parent) != root {
+		if err := c.ensureParent(parent, root); err != nil {
+			return err
+		}
+	}
+	if err := os.MkdirAll(current, defaultDirPerm); err != nil {
+		return err
+	}
+	return c.copyIfNeeded(current, parent)
+}
+
+// copyIfNeeded copies the cpuset.cpus and cpuset.mems from the parent
+// directory to the current directory if the file's contents are 0
+func (c *cpusetController) copyIfNeeded(current, parent string) error {
+	var (
+		err                      error
+		currentCpus, currentMems []byte
+		parentCpus, parentMems   []byte
+	)
+	if currentCpus, currentMems, err = c.getValues(current); err != nil {
+		return err
+	}
+	if parentCpus, parentMems, err = c.getValues(parent); err != nil {
+		return err
+	}
+	if isEmpty(currentCpus) {
+		if err := ioutil.WriteFile(
+			filepath.Join(current, "cpuset.cpus"),
+			parentCpus,
+			defaultFilePerm,
+		); err != nil {
+			return err
+		}
+	}
+	if isEmpty(currentMems) {
+		if err := ioutil.WriteFile(
+			filepath.Join(current, "cpuset.mems"),
+			parentMems,
+			defaultFilePerm,
+		); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func isEmpty(b []byte) bool {
+	return len(bytes.Trim(b, "\n")) == 0
+}
diff --git a/vendor/github.com/containerd/cgroups/devices.go b/vendor/github.com/containerd/cgroups/devices.go
new file mode 100644
index 0000000..f0dca5c
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/devices.go
@@ -0,0 +1,74 @@
+package cgroups
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+	allowDeviceFile = "devices.allow"
+	denyDeviceFile  = "devices.deny"
+	wildcard        = -1
+)
+
+func NewDevices(root string) *devicesController {
+	return &devicesController{
+		root: filepath.Join(root, string(Devices)),
+	}
+}
+
+type devicesController struct {
+	root string
+}
+
+func (d *devicesController) Name() Name {
+	return Devices
+}
+
+func (d *devicesController) Path(path string) string {
+	return filepath.Join(d.root, path)
+}
+
+func (d *devicesController) Create(path string, resources *specs.LinuxResources) error {
+	if err := os.MkdirAll(d.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	for _, device := range resources.Devices {
+		file := denyDeviceFile
+		if device.Allow {
+			file = allowDeviceFile
+		}
+		if err := ioutil.WriteFile(
+			filepath.Join(d.Path(path), file),
+			[]byte(deviceString(device)),
+			defaultFilePerm,
+		); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (d *devicesController) Update(path string, resources *specs.LinuxResources) error {
+	return d.Create(path, resources)
+}
+
+func deviceString(device specs.LinuxDeviceCgroup) string {
+	return fmt.Sprintf("%c %s:%s %s",
+		&device.Type,
+		deviceNumber(device.Major),
+		deviceNumber(device.Minor),
+		&device.Access,
+	)
+}
+
+func deviceNumber(number *int64) string {
+	if number == nil || *number == wildcard {
+		return "*"
+	}
+	return fmt.Sprint(*number)
+}
diff --git a/vendor/github.com/containerd/cgroups/errors.go b/vendor/github.com/containerd/cgroups/errors.go
new file mode 100644
index 0000000..d18b4b1
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/errors.go
@@ -0,0 +1,31 @@
+package cgroups
+
+import (
+	"errors"
+	"os"
+)
+
+var (
+	ErrInvalidPid               = errors.New("cgroups: pid must be greater than 0")
+	ErrMountPointNotExist       = errors.New("cgroups: cgroup mountpoint does not exist")
+	ErrInvalidFormat            = errors.New("cgroups: parsing file with invalid format failed")
+	ErrFreezerNotSupported      = errors.New("cgroups: freezer cgroup not supported on this system")
+	ErrMemoryNotSupported       = errors.New("cgroups: memory cgroup not supported on this system")
+	ErrCgroupDeleted            = errors.New("cgroups: cgroup deleted")
+	ErrNoCgroupMountDestination = errors.New("cgroups: cannot found cgroup mount destination")
+)
+
+// ErrorHandler is a function that handles and acts on errors
+type ErrorHandler func(err error) error
+
+// IgnoreNotExist ignores any errors that are for not existing files
+func IgnoreNotExist(err error) error {
+	if os.IsNotExist(err) {
+		return nil
+	}
+	return err
+}
+
+func errPassthrough(err error) error {
+	return err
+}
diff --git a/vendor/github.com/containerd/cgroups/freezer.go b/vendor/github.com/containerd/cgroups/freezer.go
new file mode 100644
index 0000000..0beefc6
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/freezer.go
@@ -0,0 +1,66 @@
+package cgroups
+
+import (
+	"io/ioutil"
+	"path/filepath"
+	"strings"
+	"time"
+)
+
+func NewFreezer(root string) *freezerController {
+	return &freezerController{
+		root: filepath.Join(root, string(Freezer)),
+	}
+}
+
+type freezerController struct {
+	root string
+}
+
+func (f *freezerController) Name() Name {
+	return Freezer
+}
+
+func (f *freezerController) Path(path string) string {
+	return filepath.Join(f.root, path)
+}
+
+func (f *freezerController) Freeze(path string) error {
+	return f.waitState(path, Frozen)
+}
+
+func (f *freezerController) Thaw(path string) error {
+	return f.waitState(path, Thawed)
+}
+
+func (f *freezerController) changeState(path string, state State) error {
+	return ioutil.WriteFile(
+		filepath.Join(f.root, path, "freezer.state"),
+		[]byte(strings.ToUpper(string(state))),
+		defaultFilePerm,
+	)
+}
+
+func (f *freezerController) state(path string) (State, error) {
+	current, err := ioutil.ReadFile(filepath.Join(f.root, path, "freezer.state"))
+	if err != nil {
+		return "", err
+	}
+	return State(strings.ToLower(strings.TrimSpace(string(current)))), nil
+}
+
+func (f *freezerController) waitState(path string, state State) error {
+	for {
+		if err := f.changeState(path, state); err != nil {
+			return err
+		}
+		current, err := f.state(path)
+		if err != nil {
+			return err
+		}
+		if current == state {
+			return nil
+		}
+		time.Sleep(1 * time.Millisecond)
+	}
+}
diff --git a/vendor/github.com/containerd/cgroups/hierarchy.go b/vendor/github.com/containerd/cgroups/hierarchy.go
new file mode 100644
index 0000000..b61660d
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/hierarchy.go
@@ -0,0 +1,4 @@
+package cgroups
+
+// Hierarchy enableds both unified and split hierarchy for cgroups
+type Hierarchy func() ([]Subsystem, error)
diff --git a/vendor/github.com/containerd/cgroups/hugetlb.go b/vendor/github.com/containerd/cgroups/hugetlb.go
new file mode 100644
index 0000000..af75e1b
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/hugetlb.go
@@ -0,0 +1,93 @@
+package cgroups
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewHugetlb(root string) (*hugetlbController, error) {
+	sizes, err := hugePageSizes()
+	if err != nil {
+		return nil, err
+	}
+
+	return &hugetlbController{
+		root:  filepath.Join(root, string(Hugetlb)),
+		sizes: sizes,
+	}, nil
+}
+
+type hugetlbController struct {
+	root  string
+	sizes []string
+}
+
+func (h *hugetlbController) Name() Name {
+	return Hugetlb
+}
+
+func (h *hugetlbController) Path(path string) string {
+	return filepath.Join(h.root, path)
+}
+
+func (h *hugetlbController) Create(path string, resources *specs.LinuxResources) error {
+	if err := os.MkdirAll(h.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	for _, limit := range resources.HugepageLimits {
+		if err := ioutil.WriteFile(
+			filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", limit.Pagesize, "limit_in_bytes"}, ".")),
+			[]byte(strconv.FormatUint(limit.Limit, 10)),
+			defaultFilePerm,
+		); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (h *hugetlbController) Stat(path string, stats *Metrics) error {
+	for _, size := range h.sizes {
+		s, err := h.readSizeStat(path, size)
+		if err != nil {
+			return err
+		}
+		stats.Hugetlb = append(stats.Hugetlb, s)
+	}
+	return nil
+}
+
+func (h *hugetlbController) readSizeStat(path, size string) (*HugetlbStat, error) {
+	s := HugetlbStat{
+		Pagesize: size,
+	}
+	for _, t := range []struct {
+		name  string
+		value *uint64
+	}{
+		{
+			name:  "usage_in_bytes",
+			value: &s.Usage,
+		},
+		{
+			name:  "max_usage_in_bytes",
+			value: &s.Max,
+		},
+		{
+			name:  "failcnt",
+			value: &s.Failcnt,
+		},
+	} {
+		v, err := readUint(filepath.Join(h.Path(path), strings.Join([]string{"hugetlb", size, t.name}, ".")))
+		if err != nil {
+			return nil, err
+		}
+		*t.value = v
+	}
+	return &s, nil
+}
diff --git a/vendor/github.com/containerd/cgroups/memory.go b/vendor/github.com/containerd/cgroups/memory.go
new file mode 100644
index 0000000..3ecb0f7
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/memory.go
@@ -0,0 +1,309 @@
+package cgroups
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewMemory(root string) *memoryController {
+	return &memoryController{
+		root: filepath.Join(root, string(Memory)),
+	}
+}
+
+type memoryController struct {
+	root string
+}
+
+func (m *memoryController) Name() Name {
+	return Memory
+}
+
+func (m *memoryController) Path(path string) string {
+	return filepath.Join(m.root, path)
+}
+
+func (m *memoryController) Create(path string, resources *specs.LinuxResources) error {
+	if err := os.MkdirAll(m.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	if resources.Memory == nil {
+		return nil
+	}
+	if resources.Memory.Kernel != nil {
+		// Check if kernel memory is enabled
+		// We have to limit the kernel memory here as it won't be accounted at all
+		// until a limit is set on the cgroup and limit cannot be set once the
+		// cgroup has children, or if there are already tasks in the cgroup.
+		for _, i := range []int64{1, -1} {
+			if err := ioutil.WriteFile(
+				filepath.Join(m.Path(path), "memory.kmem.limit_in_bytes"),
+				[]byte(strconv.FormatInt(i, 10)),
+				defaultFilePerm,
+			); err != nil {
+				return checkEBUSY(err)
+			}
+		}
+	}
+	return m.set(path, getMemorySettings(resources))
+}
+
+func (m *memoryController) Update(path string, resources *specs.LinuxResources) error {
+	if resources.Memory == nil {
+		return nil
+	}
+	g := func(v *int64) bool {
+		return v != nil && *v > 0
+	}
+	settings := getMemorySettings(resources)
+	if g(resources.Memory.Limit) && g(resources.Memory.Swap) {
+		// if the updated swap value is larger than the current memory limit set the swap changes first
+		// then set the memory limit as swap must always be larger than the current limit
+		current, err := readUint(filepath.Join(m.Path(path), "memory.limit_in_bytes"))
+		if err != nil {
+			return err
+		}
+		if current < uint64(*resources.Memory.Swap) {
+			settings[0], settings[1] = settings[1], settings[0]
+		}
+	}
+	return m.set(path, settings)
+}
+
+func (m *memoryController) Stat(path string, stats *Metrics) error {
+	f, err := os.Open(filepath.Join(m.Path(path), "memory.stat"))
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	stats.Memory = &MemoryStat{
+		Usage:     &MemoryEntry{},
+		Swap:      &MemoryEntry{},
+		Kernel:    &MemoryEntry{},
+		KernelTCP: &MemoryEntry{},
+	}
+	if err := m.parseStats(f, stats.Memory); err != nil {
+		return err
+	}
+	for _, t := range []struct {
+		module string
+		entry  *MemoryEntry
+	}{
+		{
+			module: "",
+			entry:  stats.Memory.Usage,
+		},
+		{
+			module: "memsw",
+			entry:  stats.Memory.Swap,
+		},
+		{
+			module: "kmem",
+			entry:  stats.Memory.Kernel,
+		},
+		{
+			module: "kmem.tcp",
+			entry:  stats.Memory.KernelTCP,
+		},
+	} {
+		for _, tt := range []struct {
+			name  string
+			value *uint64
+		}{
+			{
+				name:  "usage_in_bytes",
+				value: &t.entry.Usage,
+			},
+			{
+				name:  "max_usage_in_bytes",
+				value: &t.entry.Max,
+			},
+			{
+				name:  "failcnt",
+				value: &t.entry.Failcnt,
+			},
+			{
+				name:  "limit_in_bytes",
+				value: &t.entry.Limit,
+			},
+		} {
+			parts := []string{"memory"}
+			if t.module != "" {
+				parts = append(parts, t.module)
+			}
+			parts = append(parts, tt.name)
+			v, err := readUint(filepath.Join(m.Path(path), strings.Join(parts, ".")))
+			if err != nil {
+				return err
+			}
+			*tt.value = v
+		}
+	}
+	return nil
+}
+
+func (m *memoryController) OOMEventFD(path string) (uintptr, error) {
+	root := m.Path(path)
+	f, err := os.Open(filepath.Join(root, "memory.oom_control"))
+	if err != nil {
+		return 0, err
+	}
+	defer f.Close()
+	fd, _, serr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.EFD_CLOEXEC, 0)
+	if serr != 0 {
+		return 0, serr
+	}
+	if err := writeEventFD(root, f.Fd(), fd); err != nil {
+		unix.Close(int(fd))
+		return 0, err
+	}
+	return fd, nil
+}
+
+func writeEventFD(root string, cfd, efd uintptr) error {
+	f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
+	if err != nil {
+		return err
+	}
+	_, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
+	f.Close()
+	return err
+}
+
+func (m *memoryController) parseStats(r io.Reader, stat *MemoryStat) error {
+	var (
+		raw  = make(map[string]uint64)
+		sc   = bufio.NewScanner(r)
+		line int
+	)
+	for sc.Scan() {
+		if err := sc.Err(); err != nil {
+			return err
+		}
+		key, v, err := parseKV(sc.Text())
+		if err != nil {
+			return fmt.Errorf("%d: %v", line, err)
+		}
+		raw[key] = v
+		line++
+	}
+	stat.Cache = raw["cache"]
+	stat.RSS = raw["rss"]
+	stat.RSSHuge = raw["rss_huge"]
+	stat.MappedFile = raw["mapped_file"]
+	stat.Dirty = raw["dirty"]
+	stat.Writeback = raw["writeback"]
+	stat.PgPgIn = raw["pgpgin"]
+	stat.PgPgOut = raw["pgpgout"]
+	stat.PgFault = raw["pgfault"]
+	stat.PgMajFault = raw["pgmajfault"]
+	stat.InactiveAnon = raw["inactive_anon"]
+	stat.ActiveAnon = raw["active_anon"]
+	stat.InactiveFile = raw["inactive_file"]
+	stat.ActiveFile = raw["active_file"]
+	stat.Unevictable = raw["unevictable"]
+	stat.HierarchicalMemoryLimit = raw["hierarchical_memory_limit"]
+	stat.HierarchicalSwapLimit = raw["hierarchical_memsw_limit"]
+	stat.TotalCache = raw["total_cache"]
+	stat.TotalRSS = raw["total_rss"]
+	stat.TotalRSSHuge = raw["total_rss_huge"]
+	stat.TotalMappedFile = raw["total_mapped_file"]
+	stat.TotalDirty = raw["total_dirty"]
+	stat.TotalWriteback = raw["total_writeback"]
+	stat.TotalPgPgIn = raw["total_pgpgin"]
+	stat.TotalPgPgOut = raw["total_pgpgout"]
+	stat.TotalPgFault = raw["total_pgfault"]
+	stat.TotalPgMajFault = raw["total_pgmajfault"]
+	stat.TotalInactiveAnon = raw["total_inactive_anon"]
+	stat.TotalActiveAnon = raw["total_active_anon"]
+	stat.TotalInactiveFile = raw["total_inactive_file"]
+	stat.TotalActiveFile = raw["total_active_file"]
+	stat.TotalUnevictable = raw["total_unevictable"]
+	return nil
+}
+
+func (m *memoryController) set(path string, settings []memorySettings) error {
+	for _, t := range settings {
+		if t.value != nil {
+			if err := ioutil.WriteFile(
+				filepath.Join(m.Path(path), fmt.Sprintf("memory.%s", t.name)),
+				[]byte(strconv.FormatInt(*t.value, 10)),
+				defaultFilePerm,
+			); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+type memorySettings struct {
+	name  string
+	value *int64
+}
+
+func getMemorySettings(resources *specs.LinuxResources) []memorySettings {
+	mem := resources.Memory
+	var swappiness *int64
+	if mem.Swappiness != nil {
+		v := int64(*mem.Swappiness)
+		swappiness = &v
+	}
+	return []memorySettings{
+		{
+			name:  "limit_in_bytes",
+			value: mem.Limit,
+		},
+		{
+			name:  "memsw.limit_in_bytes",
+			value: mem.Swap,
+		},
+		{
+			name:  "kmem.limit_in_bytes",
+			value: mem.Kernel,
+		},
+		{
+			name:  "kmem.tcp.limit_in_bytes",
+			value: mem.KernelTCP,
+		},
+		{
+			name:  "oom_control",
+			value: getOomControlValue(mem),
+		},
+		{
+			name:  "swappiness",
+			value: swappiness,
+		},
+	}
+}
+
+func checkEBUSY(err error) error {
+	if pathErr, ok := err.(*os.PathError); ok {
+		if errNo, ok := pathErr.Err.(syscall.Errno); ok {
+			if errNo == unix.EBUSY {
+				return fmt.Errorf(
+					"failed to set memory.kmem.limit_in_bytes, because either tasks have already joined this cgroup or it has children")
+			}
+		}
+	}
+	return err
+}
+
+func getOomControlValue(mem *specs.LinuxMemory) *int64 {
+	if mem.DisableOOMKiller != nil && *mem.DisableOOMKiller {
+		i := int64(1)
+		return &i
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/metrics.pb.go b/vendor/github.com/containerd/cgroups/metrics.pb.go
new file mode 100644
index 0000000..c112558
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/metrics.pb.go
@@ -0,0 +1,3851 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/cgroups/metrics.proto
+// DO NOT EDIT!
+
+/*
+	Package cgroups is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/cgroups/metrics.proto
+
+	It has these top-level messages:
+		Metrics
+		HugetlbStat
+		PidsStat
+		CPUStat
+		CPUUsage
+		Throttle
+		MemoryStat
+		MemoryEntry
+		BlkIOStat
+		BlkIOEntry
+*/
+package cgroups
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Metrics struct {
+	Hugetlb []*HugetlbStat `protobuf:"bytes,1,rep,name=hugetlb" json:"hugetlb,omitempty"`
+	Pids    *PidsStat      `protobuf:"bytes,2,opt,name=pids" json:"pids,omitempty"`
+	CPU     *CPUStat       `protobuf:"bytes,3,opt,name=cpu" json:"cpu,omitempty"`
+	Memory  *MemoryStat    `protobuf:"bytes,4,opt,name=memory" json:"memory,omitempty"`
+	Blkio   *BlkIOStat     `protobuf:"bytes,5,opt,name=blkio" json:"blkio,omitempty"`
+}
+
+func (m *Metrics) Reset()                    { *m = Metrics{} }
+func (*Metrics) ProtoMessage()               {}
+func (*Metrics) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} }
+
+type HugetlbStat struct {
+	Usage    uint64 `protobuf:"varint,1,opt,name=usage,proto3" json:"usage,omitempty"`
+	Max      uint64 `protobuf:"varint,2,opt,name=max,proto3" json:"max,omitempty"`
+	Failcnt  uint64 `protobuf:"varint,3,opt,name=failcnt,proto3" json:"failcnt,omitempty"`
+	Pagesize string `protobuf:"bytes,4,opt,name=pagesize,proto3" json:"pagesize,omitempty"`
+}
+
+func (m *HugetlbStat) Reset()                    { *m = HugetlbStat{} }
+func (*HugetlbStat) ProtoMessage()               {}
+func (*HugetlbStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{1} }
+
+type PidsStat struct {
+	Current uint64 `protobuf:"varint,1,opt,name=current,proto3" json:"current,omitempty"`
+	Limit   uint64 `protobuf:"varint,2,opt,name=limit,proto3" json:"limit,omitempty"`
+}
+
+func (m *PidsStat) Reset()                    { *m = PidsStat{} }
+func (*PidsStat) ProtoMessage()               {}
+func (*PidsStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{2} }
+
+type CPUStat struct {
+	Usage      *CPUUsage `protobuf:"bytes,1,opt,name=usage" json:"usage,omitempty"`
+	Throttling *Throttle `protobuf:"bytes,2,opt,name=throttling" json:"throttling,omitempty"`
+}
+
+func (m *CPUStat) Reset()                    { *m = CPUStat{} }
+func (*CPUStat) ProtoMessage()               {}
+func (*CPUStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{3} }
+
+type CPUUsage struct {
+	// values in nanoseconds
+	Total  uint64   `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
+	Kernel uint64   `protobuf:"varint,2,opt,name=kernel,proto3" json:"kernel,omitempty"`
+	User   uint64   `protobuf:"varint,3,opt,name=user,proto3" json:"user,omitempty"`
+	PerCPU []uint64 `protobuf:"varint,4,rep,packed,name=per_cpu,json=perCpu" json:"per_cpu,omitempty"`
+}
+
+func (m *CPUUsage) Reset()                    { *m = CPUUsage{} }
+func (*CPUUsage) ProtoMessage()               {}
+func (*CPUUsage) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{4} }
+
+type Throttle struct {
+	Periods          uint64 `protobuf:"varint,1,opt,name=periods,proto3" json:"periods,omitempty"`
+	ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods,proto3" json:"throttled_periods,omitempty"`
+	ThrottledTime    uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime,proto3" json:"throttled_time,omitempty"`
+}
+
+func (m *Throttle) Reset()                    { *m = Throttle{} }
+func (*Throttle) ProtoMessage()               {}
+func (*Throttle) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{5} }
+
+type MemoryStat struct {
+	Cache                   uint64       `protobuf:"varint,1,opt,name=cache,proto3" json:"cache,omitempty"`
+	RSS                     uint64       `protobuf:"varint,2,opt,name=rss,proto3" json:"rss,omitempty"`
+	RSSHuge                 uint64       `protobuf:"varint,3,opt,name=rss_huge,json=rssHuge,proto3" json:"rss_huge,omitempty"`
+	MappedFile              uint64       `protobuf:"varint,4,opt,name=mapped_file,json=mappedFile,proto3" json:"mapped_file,omitempty"`
+	Dirty                   uint64       `protobuf:"varint,5,opt,name=dirty,proto3" json:"dirty,omitempty"`
+	Writeback               uint64       `protobuf:"varint,6,opt,name=writeback,proto3" json:"writeback,omitempty"`
+	PgPgIn                  uint64       `protobuf:"varint,7,opt,name=pg_pg_in,json=pgPgIn,proto3" json:"pg_pg_in,omitempty"`
+	PgPgOut                 uint64       `protobuf:"varint,8,opt,name=pg_pg_out,json=pgPgOut,proto3" json:"pg_pg_out,omitempty"`
+	PgFault                 uint64       `protobuf:"varint,9,opt,name=pg_fault,json=pgFault,proto3" json:"pg_fault,omitempty"`
+	PgMajFault              uint64       `protobuf:"varint,10,opt,name=pg_maj_fault,json=pgMajFault,proto3" json:"pg_maj_fault,omitempty"`
+	InactiveAnon            uint64       `protobuf:"varint,11,opt,name=inactive_anon,json=inactiveAnon,proto3" json:"inactive_anon,omitempty"`
+	ActiveAnon              uint64       `protobuf:"varint,12,opt,name=active_anon,json=activeAnon,proto3" json:"active_anon,omitempty"`
+	InactiveFile            uint64       `protobuf:"varint,13,opt,name=inactive_file,json=inactiveFile,proto3" json:"inactive_file,omitempty"`
+	ActiveFile              uint64       `protobuf:"varint,14,opt,name=active_file,json=activeFile,proto3" json:"active_file,omitempty"`
+	Unevictable             uint64       `protobuf:"varint,15,opt,name=unevictable,proto3" json:"unevictable,omitempty"`
+	HierarchicalMemoryLimit uint64       `protobuf:"varint,16,opt,name=hierarchical_memory_limit,json=hierarchicalMemoryLimit,proto3" json:"hierarchical_memory_limit,omitempty"`
+	HierarchicalSwapLimit   uint64       `protobuf:"varint,17,opt,name=hierarchical_swap_limit,json=hierarchicalSwapLimit,proto3" json:"hierarchical_swap_limit,omitempty"`
+	TotalCache              uint64       `protobuf:"varint,18,opt,name=total_cache,json=totalCache,proto3" json:"total_cache,omitempty"`
+	TotalRSS                uint64       `protobuf:"varint,19,opt,name=total_rss,json=totalRss,proto3" json:"total_rss,omitempty"`
+	TotalRSSHuge            uint64       `protobuf:"varint,20,opt,name=total_rss_huge,json=totalRssHuge,proto3" json:"total_rss_huge,omitempty"`
+	TotalMappedFile         uint64       `protobuf:"varint,21,opt,name=total_mapped_file,json=totalMappedFile,proto3" json:"total_mapped_file,omitempty"`
+	TotalDirty              uint64       `protobuf:"varint,22,opt,name=total_dirty,json=totalDirty,proto3" json:"total_dirty,omitempty"`
+	TotalWriteback          uint64       `protobuf:"varint,23,opt,name=total_writeback,json=totalWriteback,proto3" json:"total_writeback,omitempty"`
+	TotalPgPgIn             uint64       `protobuf:"varint,24,opt,name=total_pg_pg_in,json=totalPgPgIn,proto3" json:"total_pg_pg_in,omitempty"`
+	TotalPgPgOut            uint64       `protobuf:"varint,25,opt,name=total_pg_pg_out,json=totalPgPgOut,proto3" json:"total_pg_pg_out,omitempty"`
+	TotalPgFault            uint64       `protobuf:"varint,26,opt,name=total_pg_fault,json=totalPgFault,proto3" json:"total_pg_fault,omitempty"`
+	TotalPgMajFault         uint64       `protobuf:"varint,27,opt,name=total_pg_maj_fault,json=totalPgMajFault,proto3" json:"total_pg_maj_fault,omitempty"`
+	TotalInactiveAnon       uint64       `protobuf:"varint,28,opt,name=total_inactive_anon,json=totalInactiveAnon,proto3" json:"total_inactive_anon,omitempty"`
+	TotalActiveAnon         uint64       `protobuf:"varint,29,opt,name=total_active_anon,json=totalActiveAnon,proto3" json:"total_active_anon,omitempty"`
+	TotalInactiveFile       uint64       `protobuf:"varint,30,opt,name=total_inactive_file,json=totalInactiveFile,proto3" json:"total_inactive_file,omitempty"`
+	TotalActiveFile         uint64       `protobuf:"varint,31,opt,name=total_active_file,json=totalActiveFile,proto3" json:"total_active_file,omitempty"`
+	TotalUnevictable        uint64       `protobuf:"varint,32,opt,name=total_unevictable,json=totalUnevictable,proto3" json:"total_unevictable,omitempty"`
+	Usage                   *MemoryEntry `protobuf:"bytes,33,opt,name=usage" json:"usage,omitempty"`
+	Swap                    *MemoryEntry `protobuf:"bytes,34,opt,name=swap" json:"swap,omitempty"`
+	Kernel                  *MemoryEntry `protobuf:"bytes,35,opt,name=kernel" json:"kernel,omitempty"`
+	KernelTCP               *MemoryEntry `protobuf:"bytes,36,opt,name=kernel_tcp,json=kernelTcp" json:"kernel_tcp,omitempty"`
+}
+
+func (m *MemoryStat) Reset()                    { *m = MemoryStat{} }
+func (*MemoryStat) ProtoMessage()               {}
+func (*MemoryStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{6} }
+
+type MemoryEntry struct {
+	Limit   uint64 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
+	Usage   uint64 `protobuf:"varint,2,opt,name=usage,proto3" json:"usage,omitempty"`
+	Max     uint64 `protobuf:"varint,3,opt,name=max,proto3" json:"max,omitempty"`
+	Failcnt uint64 `protobuf:"varint,4,opt,name=failcnt,proto3" json:"failcnt,omitempty"`
+}
+
+func (m *MemoryEntry) Reset()                    { *m = MemoryEntry{} }
+func (*MemoryEntry) ProtoMessage()               {}
+func (*MemoryEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{7} }
+
+type BlkIOStat struct {
+	IoServiceBytesRecursive []*BlkIOEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"`
+	IoServicedRecursive     []*BlkIOEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"`
+	IoQueuedRecursive       []*BlkIOEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"`
+	IoServiceTimeRecursive  []*BlkIOEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"`
+	IoWaitTimeRecursive     []*BlkIOEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"`
+	IoMergedRecursive       []*BlkIOEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"`
+	IoTimeRecursive         []*BlkIOEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"`
+	SectorsRecursive        []*BlkIOEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"`
+}
+
+func (m *BlkIOStat) Reset()                    { *m = BlkIOStat{} }
+func (*BlkIOStat) ProtoMessage()               {}
+func (*BlkIOStat) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{8} }
+
+type BlkIOEntry struct {
+	Op     string `protobuf:"bytes,1,opt,name=op,proto3" json:"op,omitempty"`
+	Device string `protobuf:"bytes,2,opt,name=device,proto3" json:"device,omitempty"`
+	Major  uint64 `protobuf:"varint,3,opt,name=major,proto3" json:"major,omitempty"`
+	Minor  uint64 `protobuf:"varint,4,opt,name=minor,proto3" json:"minor,omitempty"`
+	Value  uint64 `protobuf:"varint,5,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *BlkIOEntry) Reset()                    { *m = BlkIOEntry{} }
+func (*BlkIOEntry) ProtoMessage()               {}
+func (*BlkIOEntry) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{9} }
+
+func init() {
+	proto.RegisterType((*Metrics)(nil), "io.containerd.cgroups.v1.Metrics")
+	proto.RegisterType((*HugetlbStat)(nil), "io.containerd.cgroups.v1.HugetlbStat")
+	proto.RegisterType((*PidsStat)(nil), "io.containerd.cgroups.v1.PidsStat")
+	proto.RegisterType((*CPUStat)(nil), "io.containerd.cgroups.v1.CPUStat")
+	proto.RegisterType((*CPUUsage)(nil), "io.containerd.cgroups.v1.CPUUsage")
+	proto.RegisterType((*Throttle)(nil), "io.containerd.cgroups.v1.Throttle")
+	proto.RegisterType((*MemoryStat)(nil), "io.containerd.cgroups.v1.MemoryStat")
+	proto.RegisterType((*MemoryEntry)(nil), "io.containerd.cgroups.v1.MemoryEntry")
+	proto.RegisterType((*BlkIOStat)(nil), "io.containerd.cgroups.v1.BlkIOStat")
+	proto.RegisterType((*BlkIOEntry)(nil), "io.containerd.cgroups.v1.BlkIOEntry")
+}
+func (m *Metrics) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Metrics) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Hugetlb) > 0 {
+		for _, msg := range m.Hugetlb {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if m.Pids != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Pids.Size()))
+		n1, err := m.Pids.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	if m.CPU != nil {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.CPU.Size()))
+		n2, err := m.CPU.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	if m.Memory != nil {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Memory.Size()))
+		n3, err := m.Memory.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n3
+	}
+	if m.Blkio != nil {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Blkio.Size()))
+		n4, err := m.Blkio.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n4
+	}
+	return i, nil
+}
+
+func (m *HugetlbStat) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HugetlbStat) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Usage != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Usage))
+	}
+	if m.Max != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Max))
+	}
+	if m.Failcnt != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt))
+	}
+	if len(m.Pagesize) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(len(m.Pagesize)))
+		i += copy(dAtA[i:], m.Pagesize)
+	}
+	return i, nil
+}
+
+func (m *PidsStat) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PidsStat) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Current != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Current))
+	}
+	if m.Limit != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Limit))
+	}
+	return i, nil
+}
+
+func (m *CPUStat) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CPUStat) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Usage != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size()))
+		n5, err := m.Usage.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n5
+	}
+	if m.Throttling != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Throttling.Size()))
+		n6, err := m.Throttling.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n6
+	}
+	return i, nil
+}
+
+func (m *CPUUsage) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CPUUsage) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Total != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Total))
+	}
+	if m.Kernel != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel))
+	}
+	if m.User != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.User))
+	}
+	if len(m.PerCPU) > 0 {
+		dAtA8 := make([]byte, len(m.PerCPU)*10)
+		var j7 int
+		for _, num := range m.PerCPU {
+			for num >= 1<<7 {
+				dAtA8[j7] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j7++
+			}
+			dAtA8[j7] = uint8(num)
+			j7++
+		}
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(j7))
+		i += copy(dAtA[i:], dAtA8[:j7])
+	}
+	return i, nil
+}
+
+func (m *Throttle) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Throttle) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Periods != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Periods))
+	}
+	if m.ThrottledPeriods != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledPeriods))
+	}
+	if m.ThrottledTime != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.ThrottledTime))
+	}
+	return i, nil
+}
+
+func (m *MemoryStat) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemoryStat) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Cache != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Cache))
+	}
+	if m.RSS != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.RSS))
+	}
+	if m.RSSHuge != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.RSSHuge))
+	}
+	if m.MappedFile != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.MappedFile))
+	}
+	if m.Dirty != 0 {
+		dAtA[i] = 0x28
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Dirty))
+	}
+	if m.Writeback != 0 {
+		dAtA[i] = 0x30
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Writeback))
+	}
+	if m.PgPgIn != 0 {
+		dAtA[i] = 0x38
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgIn))
+	}
+	if m.PgPgOut != 0 {
+		dAtA[i] = 0x40
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.PgPgOut))
+	}
+	if m.PgFault != 0 {
+		dAtA[i] = 0x48
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.PgFault))
+	}
+	if m.PgMajFault != 0 {
+		dAtA[i] = 0x50
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.PgMajFault))
+	}
+	if m.InactiveAnon != 0 {
+		dAtA[i] = 0x58
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveAnon))
+	}
+	if m.ActiveAnon != 0 {
+		dAtA[i] = 0x60
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveAnon))
+	}
+	if m.InactiveFile != 0 {
+		dAtA[i] = 0x68
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.InactiveFile))
+	}
+	if m.ActiveFile != 0 {
+		dAtA[i] = 0x70
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.ActiveFile))
+	}
+	if m.Unevictable != 0 {
+		dAtA[i] = 0x78
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Unevictable))
+	}
+	if m.HierarchicalMemoryLimit != 0 {
+		dAtA[i] = 0x80
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalMemoryLimit))
+	}
+	if m.HierarchicalSwapLimit != 0 {
+		dAtA[i] = 0x88
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.HierarchicalSwapLimit))
+	}
+	if m.TotalCache != 0 {
+		dAtA[i] = 0x90
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalCache))
+	}
+	if m.TotalRSS != 0 {
+		dAtA[i] = 0x98
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSS))
+	}
+	if m.TotalRSSHuge != 0 {
+		dAtA[i] = 0xa0
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalRSSHuge))
+	}
+	if m.TotalMappedFile != 0 {
+		dAtA[i] = 0xa8
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalMappedFile))
+	}
+	if m.TotalDirty != 0 {
+		dAtA[i] = 0xb0
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalDirty))
+	}
+	if m.TotalWriteback != 0 {
+		dAtA[i] = 0xb8
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalWriteback))
+	}
+	if m.TotalPgPgIn != 0 {
+		dAtA[i] = 0xc0
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgIn))
+	}
+	if m.TotalPgPgOut != 0 {
+		dAtA[i] = 0xc8
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgPgOut))
+	}
+	if m.TotalPgFault != 0 {
+		dAtA[i] = 0xd0
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgFault))
+	}
+	if m.TotalPgMajFault != 0 {
+		dAtA[i] = 0xd8
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalPgMajFault))
+	}
+	if m.TotalInactiveAnon != 0 {
+		dAtA[i] = 0xe0
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveAnon))
+	}
+	if m.TotalActiveAnon != 0 {
+		dAtA[i] = 0xe8
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveAnon))
+	}
+	if m.TotalInactiveFile != 0 {
+		dAtA[i] = 0xf0
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalInactiveFile))
+	}
+	if m.TotalActiveFile != 0 {
+		dAtA[i] = 0xf8
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalActiveFile))
+	}
+	if m.TotalUnevictable != 0 {
+		dAtA[i] = 0x80
+		i++
+		dAtA[i] = 0x2
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.TotalUnevictable))
+	}
+	if m.Usage != nil {
+		dAtA[i] = 0x8a
+		i++
+		dAtA[i] = 0x2
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Usage.Size()))
+		n9, err := m.Usage.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n9
+	}
+	if m.Swap != nil {
+		dAtA[i] = 0x92
+		i++
+		dAtA[i] = 0x2
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Swap.Size()))
+		n10, err := m.Swap.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n10
+	}
+	if m.Kernel != nil {
+		dAtA[i] = 0x9a
+		i++
+		dAtA[i] = 0x2
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Kernel.Size()))
+		n11, err := m.Kernel.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n11
+	}
+	if m.KernelTCP != nil {
+		dAtA[i] = 0xa2
+		i++
+		dAtA[i] = 0x2
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.KernelTCP.Size()))
+		n12, err := m.KernelTCP.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n12
+	}
+	return i, nil
+}
+
+func (m *MemoryEntry) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemoryEntry) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Limit != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Limit))
+	}
+	if m.Usage != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Usage))
+	}
+	if m.Max != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Max))
+	}
+	if m.Failcnt != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Failcnt))
+	}
+	return i, nil
+}
+
+func (m *BlkIOStat) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *BlkIOStat) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.IoServiceBytesRecursive) > 0 {
+		for _, msg := range m.IoServiceBytesRecursive {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.IoServicedRecursive) > 0 {
+		for _, msg := range m.IoServicedRecursive {
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.IoQueuedRecursive) > 0 {
+		for _, msg := range m.IoQueuedRecursive {
+			dAtA[i] = 0x1a
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.IoServiceTimeRecursive) > 0 {
+		for _, msg := range m.IoServiceTimeRecursive {
+			dAtA[i] = 0x22
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.IoWaitTimeRecursive) > 0 {
+		for _, msg := range m.IoWaitTimeRecursive {
+			dAtA[i] = 0x2a
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.IoMergedRecursive) > 0 {
+		for _, msg := range m.IoMergedRecursive {
+			dAtA[i] = 0x32
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.IoTimeRecursive) > 0 {
+		for _, msg := range m.IoTimeRecursive {
+			dAtA[i] = 0x3a
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.SectorsRecursive) > 0 {
+		for _, msg := range m.SectorsRecursive {
+			dAtA[i] = 0x42
+			i++
+			i = encodeVarintMetrics(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *BlkIOEntry) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *BlkIOEntry) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Op) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(len(m.Op)))
+		i += copy(dAtA[i:], m.Op)
+	}
+	if len(m.Device) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(len(m.Device)))
+		i += copy(dAtA[i:], m.Device)
+	}
+	if m.Major != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Major))
+	}
+	if m.Minor != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Minor))
+	}
+	if m.Value != 0 {
+		dAtA[i] = 0x28
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Value))
+	}
+	return i, nil
+}
+
+func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Metrics) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Hugetlb) > 0 {
+		for _, e := range m.Hugetlb {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	if m.Pids != nil {
+		l = m.Pids.Size()
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	if m.CPU != nil {
+		l = m.CPU.Size()
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	if m.Memory != nil {
+		l = m.Memory.Size()
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	if m.Blkio != nil {
+		l = m.Blkio.Size()
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	return n
+}
+
+func (m *HugetlbStat) Size() (n int) {
+	var l int
+	_ = l
+	if m.Usage != 0 {
+		n += 1 + sovMetrics(uint64(m.Usage))
+	}
+	if m.Max != 0 {
+		n += 1 + sovMetrics(uint64(m.Max))
+	}
+	if m.Failcnt != 0 {
+		n += 1 + sovMetrics(uint64(m.Failcnt))
+	}
+	l = len(m.Pagesize)
+	if l > 0 {
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	return n
+}
+
+func (m *PidsStat) Size() (n int) {
+	var l int
+	_ = l
+	if m.Current != 0 {
+		n += 1 + sovMetrics(uint64(m.Current))
+	}
+	if m.Limit != 0 {
+		n += 1 + sovMetrics(uint64(m.Limit))
+	}
+	return n
+}
+
+func (m *CPUStat) Size() (n int) {
+	var l int
+	_ = l
+	if m.Usage != nil {
+		l = m.Usage.Size()
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	if m.Throttling != nil {
+		l = m.Throttling.Size()
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	return n
+}
+
+func (m *CPUUsage) Size() (n int) {
+	var l int
+	_ = l
+	if m.Total != 0 {
+		n += 1 + sovMetrics(uint64(m.Total))
+	}
+	if m.Kernel != 0 {
+		n += 1 + sovMetrics(uint64(m.Kernel))
+	}
+	if m.User != 0 {
+		n += 1 + sovMetrics(uint64(m.User))
+	}
+	if len(m.PerCPU) > 0 {
+		l = 0
+		for _, e := range m.PerCPU {
+			l += sovMetrics(uint64(e))
+		}
+		n += 1 + sovMetrics(uint64(l)) + l
+	}
+	return n
+}
+
+func (m *Throttle) Size() (n int) {
+	var l int
+	_ = l
+	if m.Periods != 0 {
+		n += 1 + sovMetrics(uint64(m.Periods))
+	}
+	if m.ThrottledPeriods != 0 {
+		n += 1 + sovMetrics(uint64(m.ThrottledPeriods))
+	}
+	if m.ThrottledTime != 0 {
+		n += 1 + sovMetrics(uint64(m.ThrottledTime))
+	}
+	return n
+}
+
+func (m *MemoryStat) Size() (n int) {
+	var l int
+	_ = l
+	if m.Cache != 0 {
+		n += 1 + sovMetrics(uint64(m.Cache))
+	}
+	if m.RSS != 0 {
+		n += 1 + sovMetrics(uint64(m.RSS))
+	}
+	if m.RSSHuge != 0 {
+		n += 1 + sovMetrics(uint64(m.RSSHuge))
+	}
+	if m.MappedFile != 0 {
+		n += 1 + sovMetrics(uint64(m.MappedFile))
+	}
+	if m.Dirty != 0 {
+		n += 1 + sovMetrics(uint64(m.Dirty))
+	}
+	if m.Writeback != 0 {
+		n += 1 + sovMetrics(uint64(m.Writeback))
+	}
+	if m.PgPgIn != 0 {
+		n += 1 + sovMetrics(uint64(m.PgPgIn))
+	}
+	if m.PgPgOut != 0 {
+		n += 1 + sovMetrics(uint64(m.PgPgOut))
+	}
+	if m.PgFault != 0 {
+		n += 1 + sovMetrics(uint64(m.PgFault))
+	}
+	if m.PgMajFault != 0 {
+		n += 1 + sovMetrics(uint64(m.PgMajFault))
+	}
+	if m.InactiveAnon != 0 {
+		n += 1 + sovMetrics(uint64(m.InactiveAnon))
+	}
+	if m.ActiveAnon != 0 {
+		n += 1 + sovMetrics(uint64(m.ActiveAnon))
+	}
+	if m.InactiveFile != 0 {
+		n += 1 + sovMetrics(uint64(m.InactiveFile))
+	}
+	if m.ActiveFile != 0 {
+		n += 1 + sovMetrics(uint64(m.ActiveFile))
+	}
+	if m.Unevictable != 0 {
+		n += 1 + sovMetrics(uint64(m.Unevictable))
+	}
+	if m.HierarchicalMemoryLimit != 0 {
+		n += 2 + sovMetrics(uint64(m.HierarchicalMemoryLimit))
+	}
+	if m.HierarchicalSwapLimit != 0 {
+		n += 2 + sovMetrics(uint64(m.HierarchicalSwapLimit))
+	}
+	if m.TotalCache != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalCache))
+	}
+	if m.TotalRSS != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalRSS))
+	}
+	if m.TotalRSSHuge != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalRSSHuge))
+	}
+	if m.TotalMappedFile != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalMappedFile))
+	}
+	if m.TotalDirty != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalDirty))
+	}
+	if m.TotalWriteback != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalWriteback))
+	}
+	if m.TotalPgPgIn != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalPgPgIn))
+	}
+	if m.TotalPgPgOut != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalPgPgOut))
+	}
+	if m.TotalPgFault != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalPgFault))
+	}
+	if m.TotalPgMajFault != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalPgMajFault))
+	}
+	if m.TotalInactiveAnon != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalInactiveAnon))
+	}
+	if m.TotalActiveAnon != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalActiveAnon))
+	}
+	if m.TotalInactiveFile != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalInactiveFile))
+	}
+	if m.TotalActiveFile != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalActiveFile))
+	}
+	if m.TotalUnevictable != 0 {
+		n += 2 + sovMetrics(uint64(m.TotalUnevictable))
+	}
+	if m.Usage != nil {
+		l = m.Usage.Size()
+		n += 2 + l + sovMetrics(uint64(l))
+	}
+	if m.Swap != nil {
+		l = m.Swap.Size()
+		n += 2 + l + sovMetrics(uint64(l))
+	}
+	if m.Kernel != nil {
+		l = m.Kernel.Size()
+		n += 2 + l + sovMetrics(uint64(l))
+	}
+	if m.KernelTCP != nil {
+		l = m.KernelTCP.Size()
+		n += 2 + l + sovMetrics(uint64(l))
+	}
+	return n
+}
+
+func (m *MemoryEntry) Size() (n int) {
+	var l int
+	_ = l
+	if m.Limit != 0 {
+		n += 1 + sovMetrics(uint64(m.Limit))
+	}
+	if m.Usage != 0 {
+		n += 1 + sovMetrics(uint64(m.Usage))
+	}
+	if m.Max != 0 {
+		n += 1 + sovMetrics(uint64(m.Max))
+	}
+	if m.Failcnt != 0 {
+		n += 1 + sovMetrics(uint64(m.Failcnt))
+	}
+	return n
+}
+
+func (m *BlkIOStat) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.IoServiceBytesRecursive) > 0 {
+		for _, e := range m.IoServiceBytesRecursive {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	if len(m.IoServicedRecursive) > 0 {
+		for _, e := range m.IoServicedRecursive {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	if len(m.IoQueuedRecursive) > 0 {
+		for _, e := range m.IoQueuedRecursive {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	if len(m.IoServiceTimeRecursive) > 0 {
+		for _, e := range m.IoServiceTimeRecursive {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	if len(m.IoWaitTimeRecursive) > 0 {
+		for _, e := range m.IoWaitTimeRecursive {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	if len(m.IoMergedRecursive) > 0 {
+		for _, e := range m.IoMergedRecursive {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	if len(m.IoTimeRecursive) > 0 {
+		for _, e := range m.IoTimeRecursive {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	if len(m.SectorsRecursive) > 0 {
+		for _, e := range m.SectorsRecursive {
+			l = e.Size()
+			n += 1 + l + sovMetrics(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *BlkIOEntry) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Op)
+	if l > 0 {
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	l = len(m.Device)
+	if l > 0 {
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	if m.Major != 0 {
+		n += 1 + sovMetrics(uint64(m.Major))
+	}
+	if m.Minor != 0 {
+		n += 1 + sovMetrics(uint64(m.Minor))
+	}
+	if m.Value != 0 {
+		n += 1 + sovMetrics(uint64(m.Value))
+	}
+	return n
+}
+
+func sovMetrics(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozMetrics(x uint64) (n int) {
+	return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Metrics) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Metrics{`,
+		`Hugetlb:` + strings.Replace(fmt.Sprintf("%v", this.Hugetlb), "HugetlbStat", "HugetlbStat", 1) + `,`,
+		`Pids:` + strings.Replace(fmt.Sprintf("%v", this.Pids), "PidsStat", "PidsStat", 1) + `,`,
+		`CPU:` + strings.Replace(fmt.Sprintf("%v", this.CPU), "CPUStat", "CPUStat", 1) + `,`,
+		`Memory:` + strings.Replace(fmt.Sprintf("%v", this.Memory), "MemoryStat", "MemoryStat", 1) + `,`,
+		`Blkio:` + strings.Replace(fmt.Sprintf("%v", this.Blkio), "BlkIOStat", "BlkIOStat", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *HugetlbStat) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&HugetlbStat{`,
+		`Usage:` + fmt.Sprintf("%v", this.Usage) + `,`,
+		`Max:` + fmt.Sprintf("%v", this.Max) + `,`,
+		`Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`,
+		`Pagesize:` + fmt.Sprintf("%v", this.Pagesize) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PidsStat) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PidsStat{`,
+		`Current:` + fmt.Sprintf("%v", this.Current) + `,`,
+		`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CPUStat) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CPUStat{`,
+		`Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "CPUUsage", "CPUUsage", 1) + `,`,
+		`Throttling:` + strings.Replace(fmt.Sprintf("%v", this.Throttling), "Throttle", "Throttle", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CPUUsage) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CPUUsage{`,
+		`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
+		`Kernel:` + fmt.Sprintf("%v", this.Kernel) + `,`,
+		`User:` + fmt.Sprintf("%v", this.User) + `,`,
+		`PerCPU:` + fmt.Sprintf("%v", this.PerCPU) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Throttle) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Throttle{`,
+		`Periods:` + fmt.Sprintf("%v", this.Periods) + `,`,
+		`ThrottledPeriods:` + fmt.Sprintf("%v", this.ThrottledPeriods) + `,`,
+		`ThrottledTime:` + fmt.Sprintf("%v", this.ThrottledTime) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MemoryStat) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&MemoryStat{`,
+		`Cache:` + fmt.Sprintf("%v", this.Cache) + `,`,
+		`RSS:` + fmt.Sprintf("%v", this.RSS) + `,`,
+		`RSSHuge:` + fmt.Sprintf("%v", this.RSSHuge) + `,`,
+		`MappedFile:` + fmt.Sprintf("%v", this.MappedFile) + `,`,
+		`Dirty:` + fmt.Sprintf("%v", this.Dirty) + `,`,
+		`Writeback:` + fmt.Sprintf("%v", this.Writeback) + `,`,
+		`PgPgIn:` + fmt.Sprintf("%v", this.PgPgIn) + `,`,
+		`PgPgOut:` + fmt.Sprintf("%v", this.PgPgOut) + `,`,
+		`PgFault:` + fmt.Sprintf("%v", this.PgFault) + `,`,
+		`PgMajFault:` + fmt.Sprintf("%v", this.PgMajFault) + `,`,
+		`InactiveAnon:` + fmt.Sprintf("%v", this.InactiveAnon) + `,`,
+		`ActiveAnon:` + fmt.Sprintf("%v", this.ActiveAnon) + `,`,
+		`InactiveFile:` + fmt.Sprintf("%v", this.InactiveFile) + `,`,
+		`ActiveFile:` + fmt.Sprintf("%v", this.ActiveFile) + `,`,
+		`Unevictable:` + fmt.Sprintf("%v", this.Unevictable) + `,`,
+		`HierarchicalMemoryLimit:` + fmt.Sprintf("%v", this.HierarchicalMemoryLimit) + `,`,
+		`HierarchicalSwapLimit:` + fmt.Sprintf("%v", this.HierarchicalSwapLimit) + `,`,
+		`TotalCache:` + fmt.Sprintf("%v", this.TotalCache) + `,`,
+		`TotalRSS:` + fmt.Sprintf("%v", this.TotalRSS) + `,`,
+		`TotalRSSHuge:` + fmt.Sprintf("%v", this.TotalRSSHuge) + `,`,
+		`TotalMappedFile:` + fmt.Sprintf("%v", this.TotalMappedFile) + `,`,
+		`TotalDirty:` + fmt.Sprintf("%v", this.TotalDirty) + `,`,
+		`TotalWriteback:` + fmt.Sprintf("%v", this.TotalWriteback) + `,`,
+		`TotalPgPgIn:` + fmt.Sprintf("%v", this.TotalPgPgIn) + `,`,
+		`TotalPgPgOut:` + fmt.Sprintf("%v", this.TotalPgPgOut) + `,`,
+		`TotalPgFault:` + fmt.Sprintf("%v", this.TotalPgFault) + `,`,
+		`TotalPgMajFault:` + fmt.Sprintf("%v", this.TotalPgMajFault) + `,`,
+		`TotalInactiveAnon:` + fmt.Sprintf("%v", this.TotalInactiveAnon) + `,`,
+		`TotalActiveAnon:` + fmt.Sprintf("%v", this.TotalActiveAnon) + `,`,
+		`TotalInactiveFile:` + fmt.Sprintf("%v", this.TotalInactiveFile) + `,`,
+		`TotalActiveFile:` + fmt.Sprintf("%v", this.TotalActiveFile) + `,`,
+		`TotalUnevictable:` + fmt.Sprintf("%v", this.TotalUnevictable) + `,`,
+		`Usage:` + strings.Replace(fmt.Sprintf("%v", this.Usage), "MemoryEntry", "MemoryEntry", 1) + `,`,
+		`Swap:` + strings.Replace(fmt.Sprintf("%v", this.Swap), "MemoryEntry", "MemoryEntry", 1) + `,`,
+		`Kernel:` + strings.Replace(fmt.Sprintf("%v", this.Kernel), "MemoryEntry", "MemoryEntry", 1) + `,`,
+		`KernelTCP:` + strings.Replace(fmt.Sprintf("%v", this.KernelTCP), "MemoryEntry", "MemoryEntry", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MemoryEntry) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&MemoryEntry{`,
+		`Limit:` + fmt.Sprintf("%v", this.Limit) + `,`,
+		`Usage:` + fmt.Sprintf("%v", this.Usage) + `,`,
+		`Max:` + fmt.Sprintf("%v", this.Max) + `,`,
+		`Failcnt:` + fmt.Sprintf("%v", this.Failcnt) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *BlkIOStat) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&BlkIOStat{`,
+		`IoServiceBytesRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceBytesRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
+		`IoServicedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServicedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
+		`IoQueuedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoQueuedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
+		`IoServiceTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoServiceTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
+		`IoWaitTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoWaitTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
+		`IoMergedRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoMergedRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
+		`IoTimeRecursive:` + strings.Replace(fmt.Sprintf("%v", this.IoTimeRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
+		`SectorsRecursive:` + strings.Replace(fmt.Sprintf("%v", this.SectorsRecursive), "BlkIOEntry", "BlkIOEntry", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *BlkIOEntry) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&BlkIOEntry{`,
+		`Op:` + fmt.Sprintf("%v", this.Op) + `,`,
+		`Device:` + fmt.Sprintf("%v", this.Device) + `,`,
+		`Major:` + fmt.Sprintf("%v", this.Major) + `,`,
+		`Minor:` + fmt.Sprintf("%v", this.Minor) + `,`,
+		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringMetrics(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Metrics) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Metrics: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Metrics: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hugetlb", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Hugetlb = append(m.Hugetlb, &HugetlbStat{})
+			if err := m.Hugetlb[len(m.Hugetlb)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pids", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Pids == nil {
+				m.Pids = &PidsStat{}
+			}
+			if err := m.Pids.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.CPU == nil {
+				m.CPU = &CPUStat{}
+			}
+			if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Memory == nil {
+				m.Memory = &MemoryStat{}
+			}
+			if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Blkio", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Blkio == nil {
+				m.Blkio = &BlkIOStat{}
+			}
+			if err := m.Blkio.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HugetlbStat) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HugetlbStat: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HugetlbStat: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
+			}
+			m.Usage = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Usage |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+			}
+			m.Max = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Max |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType)
+			}
+			m.Failcnt = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Failcnt |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pagesize", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Pagesize = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PidsStat) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PidsStat: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PidsStat: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType)
+			}
+			m.Current = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Current |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+			}
+			m.Limit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Limit |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CPUStat) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CPUStat: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CPUStat: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Usage == nil {
+				m.Usage = &CPUUsage{}
+			}
+			if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Throttling", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Throttling == nil {
+				m.Throttling = &Throttle{}
+			}
+			if err := m.Throttling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CPUUsage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CPUUsage: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CPUUsage: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
+			}
+			m.Total = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Total |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType)
+			}
+			m.Kernel = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Kernel |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			m.User = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.User |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowMetrics
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.PerCPU = append(m.PerCPU, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowMetrics
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= (int(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthMetrics
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowMetrics
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= (uint64(b) & 0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.PerCPU = append(m.PerCPU, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field PerCPU", wireType)
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Throttle) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Throttle: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Throttle: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Periods", wireType)
+			}
+			m.Periods = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Periods |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ThrottledPeriods", wireType)
+			}
+			m.ThrottledPeriods = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ThrottledPeriods |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ThrottledTime", wireType)
+			}
+			m.ThrottledTime = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ThrottledTime |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemoryStat) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemoryStat: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemoryStat: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Cache", wireType)
+			}
+			m.Cache = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Cache |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RSS", wireType)
+			}
+			m.RSS = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RSS |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RSSHuge", wireType)
+			}
+			m.RSSHuge = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RSSHuge |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MappedFile", wireType)
+			}
+			m.MappedFile = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MappedFile |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Dirty", wireType)
+			}
+			m.Dirty = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Dirty |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Writeback", wireType)
+			}
+			m.Writeback = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Writeback |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PgPgIn", wireType)
+			}
+			m.PgPgIn = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PgPgIn |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PgPgOut", wireType)
+			}
+			m.PgPgOut = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PgPgOut |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PgFault", wireType)
+			}
+			m.PgFault = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PgFault |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PgMajFault", wireType)
+			}
+			m.PgMajFault = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PgMajFault |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InactiveAnon", wireType)
+			}
+			m.InactiveAnon = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.InactiveAnon |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ActiveAnon", wireType)
+			}
+			m.ActiveAnon = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ActiveAnon |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InactiveFile", wireType)
+			}
+			m.InactiveFile = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.InactiveFile |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 14:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ActiveFile", wireType)
+			}
+			m.ActiveFile = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ActiveFile |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 15:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Unevictable", wireType)
+			}
+			m.Unevictable = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Unevictable |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 16:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalMemoryLimit", wireType)
+			}
+			m.HierarchicalMemoryLimit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.HierarchicalMemoryLimit |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 17:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field HierarchicalSwapLimit", wireType)
+			}
+			m.HierarchicalSwapLimit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.HierarchicalSwapLimit |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 18:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalCache", wireType)
+			}
+			m.TotalCache = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalCache |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 19:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalRSS", wireType)
+			}
+			m.TotalRSS = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalRSS |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 20:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalRSSHuge", wireType)
+			}
+			m.TotalRSSHuge = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalRSSHuge |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 21:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalMappedFile", wireType)
+			}
+			m.TotalMappedFile = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalMappedFile |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 22:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalDirty", wireType)
+			}
+			m.TotalDirty = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalDirty |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 23:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalWriteback", wireType)
+			}
+			m.TotalWriteback = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalWriteback |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 24:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgIn", wireType)
+			}
+			m.TotalPgPgIn = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalPgPgIn |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 25:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalPgPgOut", wireType)
+			}
+			m.TotalPgPgOut = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalPgPgOut |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 26:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalPgFault", wireType)
+			}
+			m.TotalPgFault = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalPgFault |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 27:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalPgMajFault", wireType)
+			}
+			m.TotalPgMajFault = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalPgMajFault |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 28:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveAnon", wireType)
+			}
+			m.TotalInactiveAnon = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalInactiveAnon |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 29:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveAnon", wireType)
+			}
+			m.TotalActiveAnon = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalActiveAnon |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 30:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalInactiveFile", wireType)
+			}
+			m.TotalInactiveFile = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalInactiveFile |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 31:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalActiveFile", wireType)
+			}
+			m.TotalActiveFile = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalActiveFile |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 32:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TotalUnevictable", wireType)
+			}
+			m.TotalUnevictable = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TotalUnevictable |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 33:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Usage == nil {
+				m.Usage = &MemoryEntry{}
+			}
+			if err := m.Usage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 34:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Swap", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Swap == nil {
+				m.Swap = &MemoryEntry{}
+			}
+			if err := m.Swap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 35:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kernel", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Kernel == nil {
+				m.Kernel = &MemoryEntry{}
+			}
+			if err := m.Kernel.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 36:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KernelTCP", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.KernelTCP == nil {
+				m.KernelTCP = &MemoryEntry{}
+			}
+			if err := m.KernelTCP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemoryEntry) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemoryEntry: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemoryEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+			}
+			m.Limit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Limit |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType)
+			}
+			m.Usage = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Usage |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType)
+			}
+			m.Max = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Max |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Failcnt", wireType)
+			}
+			m.Failcnt = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Failcnt |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *BlkIOStat) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: BlkIOStat: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: BlkIOStat: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoServiceBytesRecursive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IoServiceBytesRecursive = append(m.IoServiceBytesRecursive, &BlkIOEntry{})
+			if err := m.IoServiceBytesRecursive[len(m.IoServiceBytesRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoServicedRecursive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IoServicedRecursive = append(m.IoServicedRecursive, &BlkIOEntry{})
+			if err := m.IoServicedRecursive[len(m.IoServicedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoQueuedRecursive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IoQueuedRecursive = append(m.IoQueuedRecursive, &BlkIOEntry{})
+			if err := m.IoQueuedRecursive[len(m.IoQueuedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoServiceTimeRecursive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IoServiceTimeRecursive = append(m.IoServiceTimeRecursive, &BlkIOEntry{})
+			if err := m.IoServiceTimeRecursive[len(m.IoServiceTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoWaitTimeRecursive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IoWaitTimeRecursive = append(m.IoWaitTimeRecursive, &BlkIOEntry{})
+			if err := m.IoWaitTimeRecursive[len(m.IoWaitTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoMergedRecursive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IoMergedRecursive = append(m.IoMergedRecursive, &BlkIOEntry{})
+			if err := m.IoMergedRecursive[len(m.IoMergedRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoTimeRecursive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.IoTimeRecursive = append(m.IoTimeRecursive, &BlkIOEntry{})
+			if err := m.IoTimeRecursive[len(m.IoTimeRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SectorsRecursive", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SectorsRecursive = append(m.SectorsRecursive, &BlkIOEntry{})
+			if err := m.SectorsRecursive[len(m.SectorsRecursive)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *BlkIOEntry) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: BlkIOEntry: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: BlkIOEntry: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Op", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Op = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Device", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Device = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Major", wireType)
+			}
+			m.Major = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Major |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Minor", wireType)
+			}
+			m.Minor = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Minor |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			m.Value = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Value |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipMetrics(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthMetrics
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowMetrics
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipMetrics(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowMetrics   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() { proto.RegisterFile("github.com/containerd/cgroups/metrics.proto", fileDescriptorMetrics) }
+
+var fileDescriptorMetrics = []byte{
+	// 1325 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x57, 0x4d, 0x6f, 0x1b, 0xb7,
+	0x16, 0x8d, 0xac, 0xb1, 0x3e, 0xae, 0x6c, 0xc7, 0xa6, 0x13, 0x67, 0xec, 0x97, 0x27, 0x29, 0xb2,
+	0xfd, 0x9e, 0x5b, 0x03, 0x32, 0x9a, 0x02, 0x41, 0x93, 0xa6, 0x28, 0x22, 0xb7, 0x41, 0x83, 0xd6,
+	0x88, 0x32, 0xb2, 0x91, 0x76, 0x35, 0x18, 0x8d, 0x98, 0x31, 0xe3, 0xd1, 0x70, 0xc2, 0xe1, 0xc8,
+	0x71, 0x57, 0xdd, 0xf5, 0x37, 0xf5, 0x1f, 0x64, 0xd9, 0x4d, 0x81, 0x76, 0x63, 0x34, 0xfa, 0x25,
+	0x05, 0x2f, 0xe7, 0x4b, 0x49, 0xdc, 0x40, 0xbb, 0xb9, 0xbc, 0xe7, 0x1c, 0x5e, 0x5e, 0x1e, 0x8a,
+	0x14, 0xec, 0x7b, 0x4c, 0x9e, 0xc6, 0xc3, 0xae, 0xcb, 0xc7, 0x07, 0x2e, 0x0f, 0xa4, 0xc3, 0x02,
+	0x2a, 0x46, 0x07, 0xae, 0x27, 0x78, 0x1c, 0x46, 0x07, 0x63, 0x2a, 0x05, 0x73, 0xa3, 0x6e, 0x28,
+	0xb8, 0xe4, 0xc4, 0x64, 0xbc, 0x9b, 0x83, 0xba, 0x09, 0xa8, 0x3b, 0xf9, 0x6c, 0xeb, 0x86, 0xc7,
+	0x3d, 0x8e, 0xa0, 0x03, 0xf5, 0xa5, 0xf1, 0x9d, 0xdf, 0x16, 0xa0, 0x7a, 0xa4, 0x15, 0xc8, 0xd7,
+	0x50, 0x3d, 0x8d, 0x3d, 0x2a, 0xfd, 0xa1, 0x59, 0x6a, 0x97, 0xf7, 0x1a, 0x77, 0x77, 0xbb, 0x57,
+	0xa9, 0x75, 0xbf, 0xd3, 0xc0, 0x81, 0x74, 0xa4, 0x95, 0xb2, 0xc8, 0x3d, 0x30, 0x42, 0x36, 0x8a,
+	0xcc, 0x85, 0x76, 0x69, 0xaf, 0x71, 0xb7, 0x73, 0x35, 0xbb, 0xcf, 0x46, 0x11, 0x52, 0x11, 0x4f,
+	0x1e, 0x42, 0xd9, 0x0d, 0x63, 0xb3, 0x8c, 0xb4, 0x3b, 0x57, 0xd3, 0x0e, 0xfb, 0x27, 0x8a, 0xd5,
+	0xab, 0x4e, 0x2f, 0x5b, 0xe5, 0xc3, 0xfe, 0x89, 0xa5, 0x68, 0xe4, 0x21, 0x54, 0xc6, 0x74, 0xcc,
+	0xc5, 0x85, 0x69, 0xa0, 0xc0, 0xce, 0xd5, 0x02, 0x47, 0x88, 0xc3, 0x99, 0x13, 0x0e, 0xb9, 0x0f,
+	0x8b, 0x43, 0xff, 0x8c, 0x71, 0x73, 0x11, 0xc9, 0xdb, 0x57, 0x93, 0x7b, 0xfe, 0xd9, 0x93, 0xa7,
+	0xc8, 0xd5, 0x8c, 0xce, 0x19, 0x34, 0x0a, 0x6d, 0x20, 0x37, 0x60, 0x31, 0x8e, 0x1c, 0x8f, 0x9a,
+	0xa5, 0x76, 0x69, 0xcf, 0xb0, 0x74, 0x40, 0x56, 0xa1, 0x3c, 0x76, 0x5e, 0x63, 0x4b, 0x0c, 0x4b,
+	0x7d, 0x12, 0x13, 0xaa, 0x2f, 0x1c, 0xe6, 0xbb, 0x81, 0xc4, 0x15, 0x1b, 0x56, 0x1a, 0x92, 0x2d,
+	0xa8, 0x85, 0x8e, 0x47, 0x23, 0xf6, 0x33, 0xc5, 0xb5, 0xd4, 0xad, 0x2c, 0xee, 0x3c, 0x80, 0x5a,
+	0xda, 0x35, 0xa5, 0xe0, 0xc6, 0x42, 0xd0, 0x40, 0x26, 0x73, 0xa5, 0xa1, 0xaa, 0xc1, 0x67, 0x63,
+	0x26, 0x93, 0xf9, 0x74, 0xd0, 0xf9, 0xb5, 0x04, 0xd5, 0xa4, 0x77, 0xe4, 0x8b, 0x62, 0x95, 0xff,
+	0xba, 0x49, 0x87, 0xfd, 0x93, 0x13, 0x85, 0x4c, 0x57, 0xd2, 0x03, 0x90, 0xa7, 0x82, 0x4b, 0xe9,
+	0xb3, 0xc0, 0xfb, 0xf8, 0x1e, 0x1f, 0x6b, 0x2c, 0xb5, 0x0a, 0xac, 0xce, 0x2b, 0xa8, 0xa5, 0xb2,
+	0xaa, 0x56, 0xc9, 0xa5, 0xe3, 0xa7, 0xfd, 0xc2, 0x80, 0x6c, 0x40, 0xe5, 0x8c, 0x8a, 0x80, 0xfa,
+	0xc9, 0x12, 0x92, 0x88, 0x10, 0x30, 0xe2, 0x88, 0x8a, 0xa4, 0x65, 0xf8, 0x4d, 0xb6, 0xa1, 0x1a,
+	0x52, 0x61, 0x2b, 0xef, 0x18, 0xed, 0xf2, 0x9e, 0xd1, 0x83, 0xe9, 0x65, 0xab, 0xd2, 0xa7, 0x42,
+	0x79, 0xa3, 0x12, 0x52, 0x71, 0x18, 0xc6, 0x9d, 0xd7, 0x50, 0x4b, 0x4b, 0x51, 0x8d, 0x0b, 0xa9,
+	0x60, 0x7c, 0x14, 0xa5, 0x8d, 0x4b, 0x42, 0xb2, 0x0f, 0x6b, 0x49, 0x99, 0x74, 0x64, 0xa7, 0x18,
+	0x5d, 0xc1, 0x6a, 0x96, 0xe8, 0x27, 0xe0, 0x5d, 0x58, 0xc9, 0xc1, 0x92, 0x8d, 0x69, 0x52, 0xd5,
+	0x72, 0x36, 0x7a, 0xcc, 0xc6, 0xb4, 0xf3, 0x57, 0x03, 0x20, 0x77, 0x9c, 0x5a, 0xaf, 0xeb, 0xb8,
+	0xa7, 0x99, 0x3f, 0x30, 0x20, 0x9b, 0x50, 0x16, 0x51, 0x32, 0x95, 0x36, 0xb6, 0x35, 0x18, 0x58,
+	0x6a, 0x8c, 0xfc, 0x0f, 0x6a, 0x22, 0x8a, 0x6c, 0x75, 0xba, 0xf4, 0x04, 0xbd, 0xc6, 0xf4, 0xb2,
+	0x55, 0xb5, 0x06, 0x03, 0x65, 0x3b, 0xab, 0x2a, 0xa2, 0x48, 0x7d, 0x90, 0x16, 0x34, 0xc6, 0x4e,
+	0x18, 0xd2, 0x91, 0xfd, 0x82, 0xf9, 0xda, 0x39, 0x86, 0x05, 0x7a, 0xe8, 0x31, 0xf3, 0xb1, 0xd3,
+	0x23, 0x26, 0xe4, 0x05, 0x7a, 0xdc, 0xb0, 0x74, 0x40, 0x6e, 0x43, 0xfd, 0x5c, 0x30, 0x49, 0x87,
+	0x8e, 0x7b, 0x66, 0x56, 0x30, 0x93, 0x0f, 0x10, 0x13, 0x6a, 0xa1, 0x67, 0x87, 0x9e, 0xcd, 0x02,
+	0xb3, 0xaa, 0x77, 0x22, 0xf4, 0xfa, 0xde, 0x93, 0x80, 0x6c, 0x41, 0x5d, 0x67, 0x78, 0x2c, 0xcd,
+	0x5a, 0xd2, 0x46, 0xaf, 0xef, 0x3d, 0x8d, 0x25, 0xd9, 0x44, 0xd6, 0x0b, 0x27, 0xf6, 0xa5, 0x59,
+	0x4f, 0x53, 0x8f, 0x55, 0x48, 0xda, 0xb0, 0x14, 0x7a, 0xf6, 0xd8, 0x79, 0x99, 0xa4, 0x41, 0x97,
+	0x19, 0x7a, 0x47, 0xce, 0x4b, 0x8d, 0xd8, 0x86, 0x65, 0x16, 0x38, 0xae, 0x64, 0x13, 0x6a, 0x3b,
+	0x01, 0x0f, 0xcc, 0x06, 0x42, 0x96, 0xd2, 0xc1, 0x47, 0x01, 0x0f, 0xd4, 0x62, 0x8b, 0x90, 0x25,
+	0xad, 0x52, 0x00, 0x14, 0x55, 0xb0, 0x1f, 0xcb, 0xb3, 0x2a, 0xd8, 0x91, 0x5c, 0x05, 0x21, 0x2b,
+	0x45, 0x15, 0x04, 0xb4, 0xa1, 0x11, 0x07, 0x74, 0xc2, 0x5c, 0xe9, 0x0c, 0x7d, 0x6a, 0x5e, 0x47,
+	0x40, 0x71, 0x88, 0x3c, 0x80, 0xcd, 0x53, 0x46, 0x85, 0x23, 0xdc, 0x53, 0xe6, 0x3a, 0xbe, 0xad,
+	0x7f, 0x4f, 0x6c, 0x7d, 0xfc, 0x56, 0x11, 0x7f, 0xab, 0x08, 0xd0, 0x4e, 0xf8, 0x41, 0xa5, 0xc9,
+	0x3d, 0x98, 0x49, 0xd9, 0xd1, 0xb9, 0x13, 0x26, 0xcc, 0x35, 0x64, 0xde, 0x2c, 0xa6, 0x07, 0xe7,
+	0x4e, 0xa8, 0x79, 0x2d, 0x68, 0xe0, 0x29, 0xb1, 0xb5, 0x91, 0x88, 0x2e, 0x1b, 0x87, 0x0e, 0xd1,
+	0x4d, 0x9f, 0x40, 0x5d, 0x03, 0x94, 0xa7, 0xd6, 0xd1, 0x33, 0x4b, 0xd3, 0xcb, 0x56, 0xed, 0x58,
+	0x0d, 0x2a, 0x63, 0xd5, 0x30, 0x6d, 0x45, 0x11, 0xb9, 0x07, 0x2b, 0x19, 0x54, 0x7b, 0xec, 0x06,
+	0xe2, 0x57, 0xa7, 0x97, 0xad, 0xa5, 0x14, 0x8f, 0x46, 0x5b, 0x4a, 0x39, 0xe8, 0xb6, 0x4f, 0x61,
+	0x4d, 0xf3, 0x8a, 0x9e, 0xbb, 0x89, 0x95, 0x5c, 0xc7, 0xc4, 0x51, 0x6e, 0xbc, 0xac, 0x5e, 0x6d,
+	0xbf, 0x8d, 0x42, 0xbd, 0xdf, 0xa0, 0x07, 0xff, 0x0f, 0x9a, 0x63, 0xe7, 0x4e, 0xbc, 0x85, 0x20,
+	0x5d, 0xdb, 0xf3, 0xcc, 0x8e, 0xdb, 0x69, 0xb5, 0x99, 0x29, 0x4d, 0xbd, 0x25, 0x38, 0xda, 0xd7,
+	0xce, 0xdc, 0x4d, 0xd5, 0x72, 0x7f, 0x6e, 0xea, 0xcd, 0xcf, 0x50, 0xca, 0xa4, 0x3b, 0x05, 0x2d,
+	0xed, 0xc5, 0xad, 0x19, 0x94, 0x76, 0xe3, 0x3e, 0x90, 0x0c, 0x95, 0xbb, 0xf6, 0x3f, 0x85, 0x85,
+	0xf6, 0x73, 0xeb, 0x76, 0x61, 0x5d, 0x83, 0x67, 0x0d, 0x7c, 0x1b, 0xd1, 0xba, 0x5f, 0x4f, 0x8a,
+	0x2e, 0xce, 0x9a, 0x58, 0x44, 0xff, 0xb7, 0xa0, 0xfd, 0x28, 0xc7, 0xbe, 0xaf, 0x8d, 0x2d, 0x6f,
+	0x7e, 0x40, 0x1b, 0x9b, 0xfe, 0xae, 0x36, 0xa2, 0x5b, 0xef, 0x69, 0x23, 0x76, 0x3f, 0xc5, 0x16,
+	0xcd, 0xde, 0x4e, 0x7e, 0xf6, 0x54, 0xe2, 0xa4, 0xe0, 0xf8, 0x2f, 0xd3, 0xab, 0xe3, 0x0e, 0xfe,
+	0xf6, 0xef, 0x7e, 0xec, 0x9e, 0xfd, 0x36, 0x90, 0xe2, 0x22, 0xbd, 0x3d, 0xee, 0x83, 0xa1, 0x5c,
+	0x6e, 0x76, 0xe6, 0xe1, 0x22, 0x85, 0x7c, 0x95, 0x5d, 0x09, 0xdb, 0xf3, 0x90, 0xd3, 0x9b, 0x63,
+	0x00, 0xa0, 0xbf, 0x6c, 0xe9, 0x86, 0xe6, 0xce, 0x1c, 0x12, 0xbd, 0xe5, 0xe9, 0x65, 0xab, 0xfe,
+	0x3d, 0x92, 0x8f, 0x0f, 0xfb, 0x56, 0x5d, 0xeb, 0x1c, 0xbb, 0x61, 0x87, 0x42, 0xa3, 0x00, 0xcc,
+	0xef, 0xdd, 0x52, 0xe1, 0xde, 0xcd, 0x5f, 0x04, 0x0b, 0x1f, 0x78, 0x11, 0x94, 0x3f, 0xf8, 0x22,
+	0x30, 0x66, 0x5e, 0x04, 0x9d, 0x3f, 0x16, 0xa1, 0x9e, 0xbd, 0x3b, 0x88, 0x03, 0x5b, 0x8c, 0xdb,
+	0x11, 0x15, 0x13, 0xe6, 0x52, 0x7b, 0x78, 0x21, 0x69, 0x64, 0x0b, 0xea, 0xc6, 0x22, 0x62, 0x13,
+	0x9a, 0xbc, 0xd9, 0x76, 0x3e, 0xf2, 0x80, 0xd1, 0xbd, 0xb9, 0xc5, 0xf8, 0x40, 0xcb, 0xf4, 0x94,
+	0x8a, 0x95, 0x8a, 0x90, 0x1f, 0xe1, 0x66, 0x3e, 0xc5, 0xa8, 0xa0, 0xbe, 0x30, 0x87, 0xfa, 0x7a,
+	0xa6, 0x3e, 0xca, 0x95, 0x8f, 0x61, 0x9d, 0x71, 0xfb, 0x55, 0x4c, 0xe3, 0x19, 0xdd, 0xf2, 0x1c,
+	0xba, 0x6b, 0x8c, 0x3f, 0x43, 0x7e, 0xae, 0x6a, 0xc3, 0x66, 0xa1, 0x25, 0xea, 0x2e, 0x2e, 0x68,
+	0x1b, 0x73, 0x68, 0x6f, 0x64, 0x35, 0xab, 0xbb, 0x3b, 0x9f, 0xe0, 0x27, 0xd8, 0x60, 0xdc, 0x3e,
+	0x77, 0x98, 0x7c, 0x57, 0x7d, 0x71, 0xbe, 0x8e, 0x3c, 0x77, 0x98, 0x9c, 0x95, 0xd6, 0x1d, 0x19,
+	0x53, 0xe1, 0xcd, 0x74, 0xa4, 0x32, 0x5f, 0x47, 0x8e, 0x90, 0x9f, 0xab, 0xf6, 0x61, 0x8d, 0xf1,
+	0x77, 0x6b, 0xad, 0xce, 0xa1, 0x79, 0x9d, 0xf1, 0xd9, 0x3a, 0x9f, 0xc1, 0x5a, 0x44, 0x5d, 0xc9,
+	0x45, 0xd1, 0x6d, 0xb5, 0x39, 0x14, 0x57, 0x13, 0x7a, 0x26, 0xd9, 0x99, 0x00, 0xe4, 0x79, 0xb2,
+	0x02, 0x0b, 0x3c, 0xc4, 0xa3, 0x53, 0xb7, 0x16, 0x78, 0xa8, 0xde, 0x80, 0x23, 0xf5, 0xb3, 0xa3,
+	0x0f, 0x4e, 0xdd, 0x4a, 0x22, 0x75, 0x9e, 0xc6, 0xce, 0x4b, 0x9e, 0x3e, 0x02, 0x75, 0x80, 0xa3,
+	0x2c, 0xe0, 0x22, 0x39, 0x3b, 0x3a, 0x50, 0xa3, 0x13, 0xc7, 0x8f, 0x69, 0xfa, 0xe6, 0xc1, 0xa0,
+	0x67, 0xbe, 0x79, 0xdb, 0xbc, 0xf6, 0xe7, 0xdb, 0xe6, 0xb5, 0x5f, 0xa6, 0xcd, 0xd2, 0x9b, 0x69,
+	0xb3, 0xf4, 0xfb, 0xb4, 0x59, 0xfa, 0x7b, 0xda, 0x2c, 0x0d, 0x2b, 0xf8, 0x7f, 0xe8, 0xf3, 0x7f,
+	0x02, 0x00, 0x00, 0xff, 0xff, 0xb2, 0x21, 0x0b, 0xcd, 0x6e, 0x0d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/cgroups/metrics.proto b/vendor/github.com/containerd/cgroups/metrics.proto
new file mode 100644
index 0000000..8c3c3c4
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/metrics.proto
@@ -0,0 +1,111 @@
+syntax = "proto3";
+
+package io.containerd.cgroups.v1;
+
+import "gogoproto/gogo.proto";
+
+message Metrics {
+	repeated HugetlbStat hugetlb = 1;
+	PidsStat pids = 2;
+	CPUStat cpu = 3 [(gogoproto.customname) = "CPU"];
+	MemoryStat memory = 4;
+	BlkIOStat blkio = 5;
+}
+
+message HugetlbStat {
+	uint64 usage = 1;
+	uint64 max = 2;
+	uint64 failcnt = 3;
+	string pagesize = 4;
+}
+
+message PidsStat {
+	uint64 current = 1;
+	uint64 limit = 2;
+}
+
+message CPUStat {
+	CPUUsage usage = 1;
+	Throttle throttling = 2;
+}
+
+message CPUUsage {
+	// values in nanoseconds
+	uint64 total = 1;
+	uint64 kernel = 2;
+	uint64 user = 3;
+	repeated uint64 per_cpu = 4 [(gogoproto.customname) = "PerCPU"];
+
+}
+
+message Throttle {
+	uint64 periods = 1;
+	uint64 throttled_periods = 2;
+	uint64 throttled_time = 3;
+}
+
+message MemoryStat {
+	uint64 cache = 1;
+	uint64 rss = 2 [(gogoproto.customname) = "RSS"];
+	uint64 rss_huge = 3 [(gogoproto.customname) = "RSSHuge"];
+	uint64 mapped_file = 4;
+	uint64 dirty = 5;
+	uint64 writeback = 6;
+	uint64 pg_pg_in = 7;
+	uint64 pg_pg_out = 8;
+	uint64 pg_fault = 9;
+	uint64 pg_maj_fault = 10;
+	uint64 inactive_anon = 11;
+	uint64 active_anon = 12;
+	uint64 inactive_file = 13;
+	uint64 active_file = 14;
+	uint64 unevictable = 15;
+	uint64 hierarchical_memory_limit = 16;
+	uint64 hierarchical_swap_limit = 17;
+	uint64 total_cache = 18;
+	uint64 total_rss = 19 [(gogoproto.customname) = "TotalRSS"];
+	uint64 total_rss_huge = 20 [(gogoproto.customname) = "TotalRSSHuge"];
+	uint64 total_mapped_file = 21;
+	uint64 total_dirty = 22;
+	uint64 total_writeback = 23;
+	uint64 total_pg_pg_in = 24;
+	uint64 total_pg_pg_out = 25;
+	uint64 total_pg_fault = 26;
+	uint64 total_pg_maj_fault = 27;
+	uint64 total_inactive_anon = 28;
+	uint64 total_active_anon = 29;
+	uint64 total_inactive_file = 30;
+	uint64 total_active_file = 31;
+	uint64 total_unevictable = 32;
+	MemoryEntry usage = 33;
+	MemoryEntry swap = 34;
+	MemoryEntry kernel = 35;
+	MemoryEntry kernel_tcp = 36 [(gogoproto.customname) = "KernelTCP"];
+
+}
+
+message MemoryEntry {
+	uint64 limit = 1;
+	uint64 usage = 2;
+	uint64 max = 3;
+	uint64 failcnt = 4;
+}
+
+message BlkIOStat {
+	repeated BlkIOEntry io_service_bytes_recursive = 1;
+	repeated BlkIOEntry io_serviced_recursive = 2;
+	repeated BlkIOEntry io_queued_recursive = 3;
+	repeated BlkIOEntry io_service_time_recursive = 4;
+	repeated BlkIOEntry io_wait_time_recursive = 5;
+	repeated BlkIOEntry io_merged_recursive = 6;
+	repeated BlkIOEntry io_time_recursive = 7;
+	repeated BlkIOEntry sectors_recursive = 8;
+}
+
+message BlkIOEntry {
+	string op = 1;
+	string device = 2;
+	uint64 major = 3;
+	uint64 minor = 4;
+	uint64 value = 5;
+}
diff --git a/vendor/github.com/containerd/cgroups/named.go b/vendor/github.com/containerd/cgroups/named.go
new file mode 100644
index 0000000..f0fbf01
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/named.go
@@ -0,0 +1,23 @@
+package cgroups
+
+import "path/filepath"
+
+func NewNamed(root string, name Name) *namedController {
+	return &namedController{
+		root: root,
+		name: name,
+	}
+}
+
+type namedController struct {
+	root string
+	name Name
+}
+
+func (n *namedController) Name() Name {
+	return n.name
+}
+
+func (n *namedController) Path(path string) string {
+	return filepath.Join(n.root, string(n.name), path)
+}
diff --git a/vendor/github.com/containerd/cgroups/net_cls.go b/vendor/github.com/containerd/cgroups/net_cls.go
new file mode 100644
index 0000000..1558444
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/net_cls.go
@@ -0,0 +1,42 @@
+package cgroups
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewNetCls(root string) *netclsController {
+	return &netclsController{
+		root: filepath.Join(root, string(NetCLS)),
+	}
+}
+
+type netclsController struct {
+	root string
+}
+
+func (n *netclsController) Name() Name {
+	return NetCLS
+}
+
+func (n *netclsController) Path(path string) string {
+	return filepath.Join(n.root, path)
+}
+
+func (n *netclsController) Create(path string, resources *specs.LinuxResources) error {
+	if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	if resources.Network != nil && resources.Network.ClassID != nil && *resources.Network.ClassID > 0 {
+		return ioutil.WriteFile(
+			filepath.Join(n.Path(path), "net_cls.classid"),
+			[]byte(strconv.FormatUint(uint64(*resources.Network.ClassID), 10)),
+			defaultFilePerm,
+		)
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/net_prio.go b/vendor/github.com/containerd/cgroups/net_prio.go
new file mode 100644
index 0000000..0959b8e
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/net_prio.go
@@ -0,0 +1,50 @@
+package cgroups
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewNetPrio(root string) *netprioController {
+	return &netprioController{
+		root: filepath.Join(root, string(NetPrio)),
+	}
+}
+
+type netprioController struct {
+	root string
+}
+
+func (n *netprioController) Name() Name {
+	return NetPrio
+}
+
+func (n *netprioController) Path(path string) string {
+	return filepath.Join(n.root, path)
+}
+
+func (n *netprioController) Create(path string, resources *specs.LinuxResources) error {
+	if err := os.MkdirAll(n.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	if resources.Network != nil {
+		for _, prio := range resources.Network.Priorities {
+			if err := ioutil.WriteFile(
+				filepath.Join(n.Path(path), "net_prio_ifpriomap"),
+				formatPrio(prio.Name, prio.Priority),
+				defaultFilePerm,
+			); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func formatPrio(name string, prio uint32) []byte {
+	return []byte(fmt.Sprintf("%s %d", name, prio))
+}
diff --git a/vendor/github.com/containerd/cgroups/paths.go b/vendor/github.com/containerd/cgroups/paths.go
new file mode 100644
index 0000000..1afc24b
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/paths.go
@@ -0,0 +1,88 @@
+package cgroups
+
+import (
+	"fmt"
+	"path/filepath"
+
+	"github.com/pkg/errors"
+)
+
+type Path func(subsystem Name) (string, error)
+
+func RootPath(subsysem Name) (string, error) {
+	return "/", nil
+}
+
+// StaticPath returns a static path to use for all cgroups
+func StaticPath(path string) Path {
+	return func(_ Name) (string, error) {
+		return path, nil
+	}
+}
+
+// NestedPath will nest the cgroups based on the calling processes cgroup
+// placing its child processes inside its own path
+func NestedPath(suffix string) Path {
+	paths, err := parseCgroupFile("/proc/self/cgroup")
+	if err != nil {
+		return errorPath(err)
+	}
+	return existingPath(paths, suffix)
+}
+
+// PidPath will return the correct cgroup paths for an existing process running inside a cgroup
+// This is commonly used for the Load function to restore an existing container
+func PidPath(pid int) Path {
+	p := fmt.Sprintf("/proc/%d/cgroup", pid)
+	paths, err := parseCgroupFile(p)
+	if err != nil {
+		return errorPath(errors.Wrapf(err, "parse cgroup file %s", p))
+	}
+	return existingPath(paths, "")
+}
+
+func existingPath(paths map[string]string, suffix string) Path {
+	// localize the paths based on the root mount dest for nested cgroups
+	for n, p := range paths {
+		dest, err := getCgroupDestination(string(n))
+		if err != nil {
+			return errorPath(err)
+		}
+		rel, err := filepath.Rel(dest, p)
+		if err != nil {
+			return errorPath(err)
+		}
+		if rel == "." {
+			rel = dest
+		}
+		paths[n] = filepath.Join("/", rel)
+	}
+	return func(name Name) (string, error) {
+		root, ok := paths[string(name)]
+		if !ok {
+			if root, ok = paths[fmt.Sprintf("name=%s", name)]; !ok {
+				return "", fmt.Errorf("unable to find %q in controller set", name)
+			}
+		}
+		if suffix != "" {
+			return filepath.Join(root, suffix), nil
+		}
+		return root, nil
+	}
+}
+
+func subPath(path Path, subName string) Path {
+	return func(name Name) (string, error) {
+		p, err := path(name)
+		if err != nil {
+			return "", err
+		}
+		return filepath.Join(p, subName), nil
+	}
+}
+
+func errorPath(err error) Path {
+	return func(_ Name) (string, error) {
+		return "", err
+	}
+}
diff --git a/vendor/github.com/containerd/cgroups/perf_event.go b/vendor/github.com/containerd/cgroups/perf_event.go
new file mode 100644
index 0000000..0fa43ec
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/perf_event.go
@@ -0,0 +1,21 @@
+package cgroups
+
+import "path/filepath"
+
+func NewPerfEvent(root string) *PerfEventController {
+	return &PerfEventController{
+		root: filepath.Join(root, string(PerfEvent)),
+	}
+}
+
+type PerfEventController struct {
+	root string
+}
+
+func (p *PerfEventController) Name() Name {
+	return PerfEvent
+}
+
+func (p *PerfEventController) Path(path string) string {
+	return filepath.Join(p.root, path)
+}
diff --git a/vendor/github.com/containerd/cgroups/pids.go b/vendor/github.com/containerd/cgroups/pids.go
new file mode 100644
index 0000000..dcf4e29
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/pids.go
@@ -0,0 +1,69 @@
+package cgroups
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func NewPids(root string) *pidsController {
+	return &pidsController{
+		root: filepath.Join(root, string(Pids)),
+	}
+}
+
+type pidsController struct {
+	root string
+}
+
+func (p *pidsController) Name() Name {
+	return Pids
+}
+
+func (p *pidsController) Path(path string) string {
+	return filepath.Join(p.root, path)
+}
+
+func (p *pidsController) Create(path string, resources *specs.LinuxResources) error {
+	if err := os.MkdirAll(p.Path(path), defaultDirPerm); err != nil {
+		return err
+	}
+	if resources.Pids != nil && resources.Pids.Limit > 0 {
+		return ioutil.WriteFile(
+			filepath.Join(p.Path(path), "pids.max"),
+			[]byte(strconv.FormatInt(resources.Pids.Limit, 10)),
+			defaultFilePerm,
+		)
+	}
+	return nil
+}
+
+func (p *pidsController) Update(path string, resources *specs.LinuxResources) error {
+	return p.Create(path, resources)
+}
+
+func (p *pidsController) Stat(path string, stats *Metrics) error {
+	current, err := readUint(filepath.Join(p.Path(path), "pids.current"))
+	if err != nil {
+		return err
+	}
+	var max uint64
+	maxData, err := ioutil.ReadFile(filepath.Join(p.Path(path), "pids.max"))
+	if err != nil {
+		return err
+	}
+	if maxS := strings.TrimSpace(string(maxData)); maxS != "max" {
+		if max, err = parseUint(maxS, 10, 64); err != nil {
+			return err
+		}
+	}
+	stats.Pids = &PidsStat{
+		Current: current,
+		Limit:   max,
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/cgroups/state.go b/vendor/github.com/containerd/cgroups/state.go
new file mode 100644
index 0000000..f1d7b7b
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/state.go
@@ -0,0 +1,12 @@
+package cgroups
+
+// State is a type that represents the state of the current cgroup
+type State string
+
+const (
+	Unknown  State = ""
+	Thawed   State = "thawed"
+	Frozen   State = "frozen"
+	Freezing State = "freezing"
+	Deleted  State = "deleted"
+)
diff --git a/vendor/github.com/containerd/cgroups/subsystem.go b/vendor/github.com/containerd/cgroups/subsystem.go
new file mode 100644
index 0000000..9393eec
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/subsystem.go
@@ -0,0 +1,94 @@
+package cgroups
+
+import (
+	"fmt"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Name is a typed name for a cgroup subsystem
+type Name string
+
+const (
+	Devices   Name = "devices"
+	Hugetlb   Name = "hugetlb"
+	Freezer   Name = "freezer"
+	Pids      Name = "pids"
+	NetCLS    Name = "net_cls"
+	NetPrio   Name = "net_prio"
+	PerfEvent Name = "perf_event"
+	Cpuset    Name = "cpuset"
+	Cpu       Name = "cpu"
+	Cpuacct   Name = "cpuacct"
+	Memory    Name = "memory"
+	Blkio     Name = "blkio"
+)
+
+// Subsystems returns a complete list of the default cgroups
+// avaliable on most linux systems
+func Subsystems() []Name {
+	n := []Name{
+		Hugetlb,
+		Freezer,
+		Pids,
+		NetCLS,
+		NetPrio,
+		PerfEvent,
+		Cpuset,
+		Cpu,
+		Cpuacct,
+		Memory,
+		Blkio,
+	}
+	if !isUserNS {
+		n = append(n, Devices)
+	}
+	return n
+}
+
+type Subsystem interface {
+	Name() Name
+}
+
+type pather interface {
+	Subsystem
+	Path(path string) string
+}
+
+type creator interface {
+	Subsystem
+	Create(path string, resources *specs.LinuxResources) error
+}
+
+type deleter interface {
+	Subsystem
+	Delete(path string) error
+}
+
+type stater interface {
+	Subsystem
+	Stat(path string, stats *Metrics) error
+}
+
+type updater interface {
+	Subsystem
+	Update(path string, resources *specs.LinuxResources) error
+}
+
+// SingleSubsystem returns a single cgroup subsystem within the base Hierarchy
+func SingleSubsystem(baseHierarchy Hierarchy, subsystem Name) Hierarchy {
+	return func() ([]Subsystem, error) {
+		subsystems, err := baseHierarchy()
+		if err != nil {
+			return nil, err
+		}
+		for _, s := range subsystems {
+			if s.Name() == subsystem {
+				return []Subsystem{
+					s,
+				}, nil
+			}
+		}
+		return nil, fmt.Errorf("unable to find subsystem %s", subsystem)
+	}
+}
diff --git a/vendor/github.com/containerd/cgroups/systemd.go b/vendor/github.com/containerd/cgroups/systemd.go
new file mode 100644
index 0000000..fd816e3
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/systemd.go
@@ -0,0 +1,115 @@
+package cgroups
+
+import (
+	"fmt"
+	"path/filepath"
+	"strings"
+	"sync"
+
+	systemdDbus "github.com/coreos/go-systemd/dbus"
+	"github.com/godbus/dbus"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+	SystemdDbus  Name = "systemd"
+	defaultSlice      = "system.slice"
+)
+
+func Systemd() ([]Subsystem, error) {
+	root, err := v1MountPoint()
+	if err != nil {
+		return nil, err
+	}
+	defaultSubsystems, err := defaults(root)
+	if err != nil {
+		return nil, err
+	}
+	s, err := NewSystemd(root)
+	if err != nil {
+		return nil, err
+	}
+	// make sure the systemd controller is added first
+	return append([]Subsystem{s}, defaultSubsystems...), nil
+}
+
+func Slice(slice, name string) Path {
+	if slice == "" {
+		slice = defaultSlice
+	}
+	return func(subsystem Name) (string, error) {
+		return filepath.Join(slice, unitName(name)), nil
+	}
+}
+
+func NewSystemd(root string) (*SystemdController, error) {
+	return &SystemdController{
+		root: root,
+	}, nil
+}
+
+type SystemdController struct {
+	mu   sync.Mutex
+	root string
+}
+
+func (s *SystemdController) Name() Name {
+	return SystemdDbus
+}
+
+func (s *SystemdController) Create(path string, resources *specs.LinuxResources) error {
+	conn, err := systemdDbus.New()
+	if err != nil {
+		return err
+	}
+	defer conn.Close()
+	slice, name := splitName(path)
+	properties := []systemdDbus.Property{
+		systemdDbus.PropDescription(fmt.Sprintf("cgroup %s", name)),
+		systemdDbus.PropWants(slice),
+		newProperty("DefaultDependencies", false),
+		newProperty("Delegate", true),
+		newProperty("MemoryAccounting", true),
+		newProperty("CPUAccounting", true),
+		newProperty("BlockIOAccounting", true),
+	}
+	ch := make(chan string)
+	_, err = conn.StartTransientUnit(name, "replace", properties, ch)
+	if err != nil {
+		return err
+	}
+	<-ch
+	return nil
+}
+
+func (s *SystemdController) Delete(path string) error {
+	conn, err := systemdDbus.New()
+	if err != nil {
+		return err
+	}
+	defer conn.Close()
+	_, name := splitName(path)
+	ch := make(chan string)
+	_, err = conn.StopUnit(name, "replace", ch)
+	if err != nil {
+		return err
+	}
+	<-ch
+	return nil
+}
+
+func newProperty(name string, units interface{}) systemdDbus.Property {
+	return systemdDbus.Property{
+		Name:  name,
+		Value: dbus.MakeVariant(units),
+	}
+}
+
+func unitName(name string) string {
+	return fmt.Sprintf("%s.slice", name)
+}
+
+func splitName(path string) (slice string, unit string) {
+	slice, unit = filepath.Split(path)
+	return strings.TrimSuffix(slice, "/"), unit
+}
diff --git a/vendor/github.com/containerd/cgroups/ticks.go b/vendor/github.com/containerd/cgroups/ticks.go
new file mode 100644
index 0000000..f471c5a
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/ticks.go
@@ -0,0 +1,10 @@
+package cgroups
+
+func getClockTicks() uint64 {
+	// The value comes from `C.sysconf(C._SC_CLK_TCK)`, and
+	// on Linux it's a constant which is safe to be hard coded,
+	// so we can avoid using cgo here.
+	// See https://github.com/containerd/cgroups/pull/12 for
+	// more details.
+	return 100
+}
diff --git a/vendor/github.com/containerd/cgroups/utils.go b/vendor/github.com/containerd/cgroups/utils.go
new file mode 100644
index 0000000..d5c7230
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/utils.go
@@ -0,0 +1,280 @@
+package cgroups
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	units "github.com/docker/go-units"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+var isUserNS = runningInUserNS()
+
+// runningInUserNS detects whether we are currently running in a user namespace.
+// Copied from github.com/lxc/lxd/shared/util.go
+func runningInUserNS() bool {
+	file, err := os.Open("/proc/self/uid_map")
+	if err != nil {
+		// This kernel-provided file only exists if user namespaces are supported
+		return false
+	}
+	defer file.Close()
+
+	buf := bufio.NewReader(file)
+	l, _, err := buf.ReadLine()
+	if err != nil {
+		return false
+	}
+
+	line := string(l)
+	var a, b, c int64
+	fmt.Sscanf(line, "%d %d %d", &a, &b, &c)
+	/*
+	 * We assume we are in the initial user namespace if we have a full
+	 * range - 4294967295 uids starting at uid 0.
+	 */
+	if a == 0 && b == 0 && c == 4294967295 {
+		return false
+	}
+	return true
+}
+
+// defaults returns all known groups
+func defaults(root string) ([]Subsystem, error) {
+	h, err := NewHugetlb(root)
+	if err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	s := []Subsystem{
+		NewNamed(root, "systemd"),
+		NewFreezer(root),
+		NewPids(root),
+		NewNetCls(root),
+		NewNetPrio(root),
+		NewPerfEvent(root),
+		NewCputset(root),
+		NewCpu(root),
+		NewCpuacct(root),
+		NewMemory(root),
+		NewBlkio(root),
+	}
+	// only add the devices cgroup if we are not in a user namespace
+	// because modifications are not allowed
+	if !isUserNS {
+		s = append(s, NewDevices(root))
+	}
+	// add the hugetlb cgroup if error wasn't due to missing hugetlb
+	// cgroup support on the host
+	if err == nil {
+		s = append(s, h)
+	}
+	return s, nil
+}
+
+// remove will remove a cgroup path handling EAGAIN and EBUSY errors and
+// retrying the remove after a exp timeout
+func remove(path string) error {
+	delay := 10 * time.Millisecond
+	for i := 0; i < 5; i++ {
+		if i != 0 {
+			time.Sleep(delay)
+			delay *= 2
+		}
+		if err := os.RemoveAll(path); err == nil {
+			return nil
+		}
+	}
+	return fmt.Errorf("cgroups: unable to remove path %q", path)
+}
+
+// readPids will read all the pids in a cgroup by the provided path
+func readPids(path string, subsystem Name) ([]Process, error) {
+	f, err := os.Open(filepath.Join(path, cgroupProcs))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	var (
+		out []Process
+		s   = bufio.NewScanner(f)
+	)
+	for s.Scan() {
+		if t := s.Text(); t != "" {
+			pid, err := strconv.Atoi(t)
+			if err != nil {
+				return nil, err
+			}
+			out = append(out, Process{
+				Pid:       pid,
+				Subsystem: subsystem,
+				Path:      path,
+			})
+		}
+	}
+	return out, nil
+}
+
+func hugePageSizes() ([]string, error) {
+	var (
+		pageSizes []string
+		sizeList  = []string{"B", "kB", "MB", "GB", "TB", "PB"}
+	)
+	files, err := ioutil.ReadDir("/sys/kernel/mm/hugepages")
+	if err != nil {
+		return nil, err
+	}
+	for _, st := range files {
+		nameArray := strings.Split(st.Name(), "-")
+		pageSize, err := units.RAMInBytes(nameArray[1])
+		if err != nil {
+			return nil, err
+		}
+		pageSizes = append(pageSizes, units.CustomSize("%g%s", float64(pageSize), 1024.0, sizeList))
+	}
+	return pageSizes, nil
+}
+
+func readUint(path string) (uint64, error) {
+	v, err := ioutil.ReadFile(path)
+	if err != nil {
+		return 0, err
+	}
+	return parseUint(strings.TrimSpace(string(v)), 10, 64)
+}
+
+func parseUint(s string, base, bitSize int) (uint64, error) {
+	v, err := strconv.ParseUint(s, base, bitSize)
+	if err != nil {
+		intValue, intErr := strconv.ParseInt(s, base, bitSize)
+		// 1. Handle negative values greater than MinInt64 (and)
+		// 2. Handle negative values lesser than MinInt64
+		if intErr == nil && intValue < 0 {
+			return 0, nil
+		} else if intErr != nil &&
+			intErr.(*strconv.NumError).Err == strconv.ErrRange &&
+			intValue < 0 {
+			return 0, nil
+		}
+		return 0, err
+	}
+	return v, nil
+}
+
+func parseKV(raw string) (string, uint64, error) {
+	parts := strings.Fields(raw)
+	switch len(parts) {
+	case 2:
+		v, err := parseUint(parts[1], 10, 64)
+		if err != nil {
+			return "", 0, err
+		}
+		return parts[0], v, nil
+	default:
+		return "", 0, ErrInvalidFormat
+	}
+}
+
+func parseCgroupFile(path string) (map[string]string, error) {
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	return parseCgroupFromReader(f)
+}
+
+func parseCgroupFromReader(r io.Reader) (map[string]string, error) {
+	var (
+		cgroups = make(map[string]string)
+		s       = bufio.NewScanner(r)
+	)
+	for s.Scan() {
+		if err := s.Err(); err != nil {
+			return nil, err
+		}
+		var (
+			text  = s.Text()
+			parts = strings.SplitN(text, ":", 3)
+		)
+		if len(parts) < 3 {
+			return nil, fmt.Errorf("invalid cgroup entry: %q", text)
+		}
+		for _, subs := range strings.Split(parts[1], ",") {
+			if subs != "" {
+				cgroups[subs] = parts[2]
+			}
+		}
+	}
+	return cgroups, nil
+}
+
+func getCgroupDestination(subsystem string) (string, error) {
+	f, err := os.Open("/proc/self/mountinfo")
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+	s := bufio.NewScanner(f)
+	for s.Scan() {
+		if err := s.Err(); err != nil {
+			return "", err
+		}
+		fields := strings.Fields(s.Text())
+		for _, opt := range strings.Split(fields[len(fields)-1], ",") {
+			if opt == subsystem {
+				return fields[3], nil
+			}
+		}
+	}
+	return "", ErrNoCgroupMountDestination
+}
+
+func pathers(subystems []Subsystem) []pather {
+	var out []pather
+	for _, s := range subystems {
+		if p, ok := s.(pather); ok {
+			out = append(out, p)
+		}
+	}
+	return out
+}
+
+func initializeSubsystem(s Subsystem, path Path, resources *specs.LinuxResources) error {
+	if c, ok := s.(creator); ok {
+		p, err := path(s.Name())
+		if err != nil {
+			return err
+		}
+		if err := c.Create(p, resources); err != nil {
+			return err
+		}
+	} else if c, ok := s.(pather); ok {
+		p, err := path(s.Name())
+		if err != nil {
+			return err
+		}
+		// do the default create if the group does not have a custom one
+		if err := os.MkdirAll(c.Path(p), defaultDirPerm); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func cleanPath(path string) string {
+	if path == "" {
+		return ""
+	}
+	path = filepath.Clean(path)
+	if !filepath.IsAbs(path) {
+		path, _ = filepath.Rel(string(os.PathSeparator), filepath.Clean(string(os.PathSeparator)+path))
+	}
+	return filepath.Clean(path)
+}
diff --git a/vendor/github.com/containerd/cgroups/v1.go b/vendor/github.com/containerd/cgroups/v1.go
new file mode 100644
index 0000000..f6608f7
--- /dev/null
+++ b/vendor/github.com/containerd/cgroups/v1.go
@@ -0,0 +1,65 @@
+package cgroups
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+)
+
+// V1 returns all the groups in the default cgroups mountpoint in a single hierarchy
+func V1() ([]Subsystem, error) {
+	root, err := v1MountPoint()
+	if err != nil {
+		return nil, err
+	}
+	subsystems, err := defaults(root)
+	if err != nil {
+		return nil, err
+	}
+	var enabled []Subsystem
+	for _, s := range pathers(subsystems) {
+		// check and remove the default groups that do not exist
+		if _, err := os.Lstat(s.Path("/")); err == nil {
+			enabled = append(enabled, s)
+		}
+	}
+	return enabled, nil
+}
+
+// v1MountPoint returns the mount point where the cgroup
+// mountpoints are mounted in a single hiearchy
+func v1MountPoint() (string, error) {
+	f, err := os.Open("/proc/self/mountinfo")
+	if err != nil {
+		return "", err
+	}
+	defer f.Close()
+	scanner := bufio.NewScanner(f)
+	for scanner.Scan() {
+		if err := scanner.Err(); err != nil {
+			return "", err
+		}
+		var (
+			text   = scanner.Text()
+			fields = strings.Split(text, " ")
+			// safe as mountinfo encodes mountpoints with spaces as \040.
+			index               = strings.Index(text, " - ")
+			postSeparatorFields = strings.Fields(text[index+3:])
+			numPostFields       = len(postSeparatorFields)
+		)
+		// this is an error as we can't detect if the mount is for "cgroup"
+		if numPostFields == 0 {
+			return "", fmt.Errorf("Found no fields post '-' in %q", text)
+		}
+		if postSeparatorFields[0] == "cgroup" {
+			// check that the mount is properly formated.
+			if numPostFields < 3 {
+				return "", fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+			}
+			return filepath.Dir(fields[4]), nil
+		}
+	}
+	return "", ErrMountPointNotExist
+}
diff --git a/vendor/github.com/containerd/console/LICENSE b/vendor/github.com/containerd/console/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/containerd/console/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/containerd/console/README.md b/vendor/github.com/containerd/console/README.md
new file mode 100644
index 0000000..4c56d9d
--- /dev/null
+++ b/vendor/github.com/containerd/console/README.md
@@ -0,0 +1,17 @@
+# console
+
+[![Build Status](https://travis-ci.org/containerd/console.svg?branch=master)](https://travis-ci.org/containerd/console)
+
+Golang package for dealing with consoles.  Light on deps and a simple API.
+
+## Modifying the current process
+
+```go
+current := console.Current()
+defer current.Reset()
+
+if err := current.SetRaw(); err != nil {
+}
+ws, err := current.Size()
+current.Resize(ws)
+```
diff --git a/vendor/github.com/containerd/console/console.go b/vendor/github.com/containerd/console/console.go
new file mode 100644
index 0000000..bf2798f
--- /dev/null
+++ b/vendor/github.com/containerd/console/console.go
@@ -0,0 +1,62 @@
+package console
+
+import (
+	"errors"
+	"io"
+	"os"
+)
+
+var ErrNotAConsole = errors.New("provided file is not a console")
+
+type Console interface {
+	io.Reader
+	io.Writer
+	io.Closer
+
+	// Resize resizes the console to the provided window size
+	Resize(WinSize) error
+	// ResizeFrom resizes the calling console to the size of the
+	// provided console
+	ResizeFrom(Console) error
+	// SetRaw sets the console in raw mode
+	SetRaw() error
+	// DisableEcho disables echo on the console
+	DisableEcho() error
+	// Reset restores the console to its orignal state
+	Reset() error
+	// Size returns the window size of the console
+	Size() (WinSize, error)
+	// Fd returns the console's file descriptor
+	Fd() uintptr
+	// Name returns the console's file name
+	Name() string
+}
+
+// WinSize specifies the window size of the console
+type WinSize struct {
+	// Height of the console
+	Height uint16
+	// Width of the console
+	Width uint16
+	x     uint16
+	y     uint16
+}
+
+// Current returns the current processes console
+func Current() Console {
+	c, err := ConsoleFromFile(os.Stdin)
+	if err != nil {
+		// stdin should always be a console for the design
+		// of this function
+		panic(err)
+	}
+	return c
+}
+
+// ConsoleFromFile returns a console using the provided file
+func ConsoleFromFile(f *os.File) (Console, error) {
+	if err := checkConsole(f); err != nil {
+		return nil, err
+	}
+	return newMaster(f)
+}
diff --git a/vendor/github.com/containerd/console/console_linux.go b/vendor/github.com/containerd/console/console_linux.go
new file mode 100644
index 0000000..c963729
--- /dev/null
+++ b/vendor/github.com/containerd/console/console_linux.go
@@ -0,0 +1,255 @@
+// +build linux
+
+package console
+
+import (
+	"io"
+	"os"
+	"sync"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	maxEvents = 128
+)
+
+// Epoller manages multiple epoll consoles using edge-triggered epoll api so we
+// dont have to deal with repeated wake-up of EPOLLER or EPOLLHUP.
+// For more details, see:
+// - https://github.com/systemd/systemd/pull/4262
+// - https://github.com/moby/moby/issues/27202
+//
+// Example usage of Epoller and EpollConsole can be as follow:
+//
+//	epoller, _ := NewEpoller()
+//	epollConsole, _ := epoller.Add(console)
+//	go epoller.Wait()
+//	var (
+//		b  bytes.Buffer
+//		wg sync.WaitGroup
+//	)
+//	wg.Add(1)
+//	go func() {
+//		io.Copy(&b, epollConsole)
+//		wg.Done()
+//	}()
+//	// perform I/O on the console
+//	epollConsole.Shutdown(epoller.CloseConsole)
+//	wg.Wait()
+//	epollConsole.Close()
+type Epoller struct {
+	efd       int
+	mu        sync.Mutex
+	fdMapping map[int]*EpollConsole
+}
+
+// NewEpoller returns an instance of epoller with a valid epoll fd.
+func NewEpoller() (*Epoller, error) {
+	efd, err := unix.EpollCreate1(unix.EPOLL_CLOEXEC)
+	if err != nil {
+		return nil, err
+	}
+	return &Epoller{
+		efd:       efd,
+		fdMapping: make(map[int]*EpollConsole),
+	}, nil
+}
+
+// Add creates a epoll console based on the provided console. The console will
+// be registered with EPOLLET (i.e. using edge-triggered notification) and its
+// file descriptor will be set to non-blocking mode. After this, user should use
+// the return console to perform I/O.
+func (e *Epoller) Add(console Console) (*EpollConsole, error) {
+	sysfd := int(console.Fd())
+	// Set sysfd to non-blocking mode
+	if err := unix.SetNonblock(sysfd, true); err != nil {
+		return nil, err
+	}
+
+	ev := unix.EpollEvent{
+		Events: unix.EPOLLIN | unix.EPOLLOUT | unix.EPOLLRDHUP | unix.EPOLLET,
+		Fd:     int32(sysfd),
+	}
+	if err := unix.EpollCtl(e.efd, unix.EPOLL_CTL_ADD, sysfd, &ev); err != nil {
+		return nil, err
+	}
+	ef := &EpollConsole{
+		Console: console,
+		sysfd:   sysfd,
+		readc:   sync.NewCond(&sync.Mutex{}),
+		writec:  sync.NewCond(&sync.Mutex{}),
+	}
+	e.mu.Lock()
+	e.fdMapping[sysfd] = ef
+	e.mu.Unlock()
+	return ef, nil
+}
+
+// Wait starts the loop to wait for its consoles' notifications and signal
+// appropriate console that it can perform I/O.
+func (e *Epoller) Wait() error {
+	events := make([]unix.EpollEvent, maxEvents)
+	for {
+		n, err := unix.EpollWait(e.efd, events, -1)
+		if err != nil {
+			// EINTR: The call was interrupted by a signal handler before either
+			// any of the requested events occurred or the timeout expired
+			if err == unix.EINTR {
+				continue
+			}
+			return err
+		}
+		for i := 0; i < n; i++ {
+			ev := &events[i]
+			// the console is ready to be read from
+			if ev.Events&(unix.EPOLLIN|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
+				if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
+					epfile.signalRead()
+				}
+			}
+			// the console is ready to be written to
+			if ev.Events&(unix.EPOLLOUT|unix.EPOLLHUP|unix.EPOLLERR) != 0 {
+				if epfile := e.getConsole(int(ev.Fd)); epfile != nil {
+					epfile.signalWrite()
+				}
+			}
+		}
+	}
+}
+
+// Close unregister the console's file descriptor from epoll interface
+func (e *Epoller) CloseConsole(fd int) error {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	delete(e.fdMapping, fd)
+	return unix.EpollCtl(e.efd, unix.EPOLL_CTL_DEL, fd, &unix.EpollEvent{})
+}
+
+func (e *Epoller) getConsole(sysfd int) *EpollConsole {
+	e.mu.Lock()
+	f := e.fdMapping[sysfd]
+	e.mu.Unlock()
+	return f
+}
+
+// Close the epoll fd
+func (e *Epoller) Close() error {
+	return unix.Close(e.efd)
+}
+
+// EpollConsole acts like a console but register its file descriptor with a
+// epoll fd and uses epoll API to perform I/O.
+type EpollConsole struct {
+	Console
+	readc  *sync.Cond
+	writec *sync.Cond
+	sysfd  int
+	closed bool
+}
+
+// Read reads up to len(p) bytes into p. It returns the number of bytes read
+// (0 <= n <= len(p)) and any error encountered.
+//
+// If the console's read returns EAGAIN or EIO, we assumes that its a
+// temporary error because the other side went away and wait for the signal
+// generated by epoll event to continue.
+func (ec *EpollConsole) Read(p []byte) (n int, err error) {
+	var read int
+	ec.readc.L.Lock()
+	defer ec.readc.L.Unlock()
+	for {
+		read, err = ec.Console.Read(p[n:])
+		n += read
+		if err != nil {
+			var hangup bool
+			if perr, ok := err.(*os.PathError); ok {
+				hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
+			} else {
+				hangup = (err == unix.EAGAIN || err == unix.EIO)
+			}
+			// if the other end disappear, assume this is temporary and wait for the
+			// signal to continue again. Unless we didnt read anything and the
+			// console is already marked as closed then we should exit
+			if hangup && !(n == 0 && len(p) > 0 && ec.closed) {
+				ec.readc.Wait()
+				continue
+			}
+		}
+		break
+	}
+	// if we didnt read anything then return io.EOF to end gracefully
+	if n == 0 && len(p) > 0 && err == nil {
+		err = io.EOF
+	}
+	// signal for others that we finished the read
+	ec.readc.Signal()
+	return n, err
+}
+
+// Writes len(p) bytes from p to the console. It returns the number of bytes
+// written from p (0 <= n <= len(p)) and any error encountered that caused
+// the write to stop early.
+//
+// If writes to the console returns EAGAIN or EIO, we assumes that its a
+// temporary error because the other side went away and wait for the signal
+// generated by epoll event to continue.
+func (ec *EpollConsole) Write(p []byte) (n int, err error) {
+	var written int
+	ec.writec.L.Lock()
+	defer ec.writec.L.Unlock()
+	for {
+		written, err = ec.Console.Write(p[n:])
+		n += written
+		if err != nil {
+			var hangup bool
+			if perr, ok := err.(*os.PathError); ok {
+				hangup = (perr.Err == unix.EAGAIN || perr.Err == unix.EIO)
+			} else {
+				hangup = (err == unix.EAGAIN || err == unix.EIO)
+			}
+			// if the other end disappear, assume this is temporary and wait for the
+			// signal to continue again.
+			if hangup {
+				ec.writec.Wait()
+				continue
+			}
+		}
+		// unrecoverable error, break the loop and return the error
+		break
+	}
+	if n < len(p) && err == nil {
+		err = io.ErrShortWrite
+	}
+	// signal for others that we finished the write
+	ec.writec.Signal()
+	return n, err
+}
+
+// Close closed the file descriptor and signal call waiters for this fd.
+// It accepts a callback which will be called with the console's fd. The
+// callback typically will be used to do further cleanup such as unregister the
+// console's fd from the epoll interface.
+// User should call Shutdown and wait for all I/O operation to be finished
+// before closing the console.
+func (ec *EpollConsole) Shutdown(close func(int) error) error {
+	ec.readc.L.Lock()
+	defer ec.readc.L.Unlock()
+	ec.writec.L.Lock()
+	defer ec.writec.L.Unlock()
+
+	ec.readc.Broadcast()
+	ec.writec.Broadcast()
+	ec.closed = true
+	return close(ec.sysfd)
+}
+
+// signalRead signals that the console is readable.
+func (ec *EpollConsole) signalRead() {
+	ec.readc.Signal()
+}
+
+// signalWrite signals that the console is writable.
+func (ec *EpollConsole) signalWrite() {
+	ec.writec.Signal()
+}
diff --git a/vendor/github.com/containerd/console/console_unix.go b/vendor/github.com/containerd/console/console_unix.go
new file mode 100644
index 0000000..118c8c3
--- /dev/null
+++ b/vendor/github.com/containerd/console/console_unix.go
@@ -0,0 +1,142 @@
+// +build darwin freebsd linux solaris
+
+package console
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// NewPty creates a new pty pair
+// The master is returned as the first console and a string
+// with the path to the pty slave is returned as the second
+func NewPty() (Console, string, error) {
+	f, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0)
+	if err != nil {
+		return nil, "", err
+	}
+	slave, err := ptsname(f)
+	if err != nil {
+		return nil, "", err
+	}
+	if err := unlockpt(f); err != nil {
+		return nil, "", err
+	}
+	m, err := newMaster(f)
+	if err != nil {
+		return nil, "", err
+	}
+	return m, slave, nil
+}
+
+type master struct {
+	f        *os.File
+	original *unix.Termios
+}
+
+func (m *master) Read(b []byte) (int, error) {
+	return m.f.Read(b)
+}
+
+func (m *master) Write(b []byte) (int, error) {
+	return m.f.Write(b)
+}
+
+func (m *master) Close() error {
+	return m.f.Close()
+}
+
+func (m *master) Resize(ws WinSize) error {
+	return tcswinsz(m.f.Fd(), ws)
+}
+
+func (m *master) ResizeFrom(c Console) error {
+	ws, err := c.Size()
+	if err != nil {
+		return err
+	}
+	return m.Resize(ws)
+}
+
+func (m *master) Reset() error {
+	if m.original == nil {
+		return nil
+	}
+	return tcset(m.f.Fd(), m.original)
+}
+
+func (m *master) getCurrent() (unix.Termios, error) {
+	var termios unix.Termios
+	if err := tcget(m.f.Fd(), &termios); err != nil {
+		return unix.Termios{}, err
+	}
+	return termios, nil
+}
+
+func (m *master) SetRaw() error {
+	rawState, err := m.getCurrent()
+	if err != nil {
+		return err
+	}
+	rawState = cfmakeraw(rawState)
+	rawState.Oflag = rawState.Oflag | unix.OPOST
+	return tcset(m.f.Fd(), &rawState)
+}
+
+func (m *master) DisableEcho() error {
+	rawState, err := m.getCurrent()
+	if err != nil {
+		return err
+	}
+	rawState.Lflag = rawState.Lflag &^ unix.ECHO
+	return tcset(m.f.Fd(), &rawState)
+}
+
+func (m *master) Size() (WinSize, error) {
+	return tcgwinsz(m.f.Fd())
+}
+
+func (m *master) Fd() uintptr {
+	return m.f.Fd()
+}
+
+func (m *master) Name() string {
+	return m.f.Name()
+}
+
+// checkConsole checks if the provided file is a console
+func checkConsole(f *os.File) error {
+	var termios unix.Termios
+	if tcget(f.Fd(), &termios) != nil {
+		return ErrNotAConsole
+	}
+	return nil
+}
+
+func newMaster(f *os.File) (Console, error) {
+	m := &master{
+		f: f,
+	}
+	t, err := m.getCurrent()
+	if err != nil {
+		return nil, err
+	}
+	m.original = &t
+	return m, nil
+}
+
+// ClearONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
+// created by us acts normally. In particular, a not-very-well-known default of
+// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
+// problem for terminal emulators, because we relay data from the terminal we
+// also relay that funky line discipline.
+func ClearONLCR(fd uintptr) error {
+	return setONLCR(fd, false)
+}
+
+// SetONLCR sets the necessary tty_ioctl(4)s to ensure that a pty pair
+// created by us acts as intended for a terminal emulator.
+func SetONLCR(fd uintptr) error {
+	return setONLCR(fd, true)
+}
diff --git a/vendor/github.com/containerd/console/console_windows.go b/vendor/github.com/containerd/console/console_windows.go
new file mode 100644
index 0000000..d78a0b8
--- /dev/null
+++ b/vendor/github.com/containerd/console/console_windows.go
@@ -0,0 +1,200 @@
+package console
+
+import (
+	"fmt"
+	"os"
+
+	"github.com/pkg/errors"
+	"golang.org/x/sys/windows"
+)
+
+var (
+	vtInputSupported  bool
+	ErrNotImplemented = errors.New("not implemented")
+)
+
+func (m *master) initStdios() {
+	m.in = windows.Handle(os.Stdin.Fd())
+	if err := windows.GetConsoleMode(m.in, &m.inMode); err == nil {
+		// Validate that windows.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it.
+		if err = windows.SetConsoleMode(m.in, m.inMode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err == nil {
+			vtInputSupported = true
+		}
+		// Unconditionally set the console mode back even on failure because SetConsoleMode
+		// remembers invalid bits on input handles.
+		windows.SetConsoleMode(m.in, m.inMode)
+	} else {
+		fmt.Printf("failed to get console mode for stdin: %v\n", err)
+	}
+
+	m.out = windows.Handle(os.Stdout.Fd())
+	if err := windows.GetConsoleMode(m.out, &m.outMode); err == nil {
+		if err := windows.SetConsoleMode(m.out, m.outMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
+			m.outMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+		} else {
+			windows.SetConsoleMode(m.out, m.outMode)
+		}
+	} else {
+		fmt.Printf("failed to get console mode for stdout: %v\n", err)
+	}
+
+	m.err = windows.Handle(os.Stderr.Fd())
+	if err := windows.GetConsoleMode(m.err, &m.errMode); err == nil {
+		if err := windows.SetConsoleMode(m.err, m.errMode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING); err == nil {
+			m.errMode |= windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
+		} else {
+			windows.SetConsoleMode(m.err, m.errMode)
+		}
+	} else {
+		fmt.Printf("failed to get console mode for stderr: %v\n", err)
+	}
+}
+
+type master struct {
+	in     windows.Handle
+	inMode uint32
+
+	out     windows.Handle
+	outMode uint32
+
+	err     windows.Handle
+	errMode uint32
+}
+
+func (m *master) SetRaw() error {
+	if err := makeInputRaw(m.in, m.inMode); err != nil {
+		return err
+	}
+
+	// Set StdOut and StdErr to raw mode, we ignore failures since
+	// windows.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this version of
+	// Windows.
+
+	windows.SetConsoleMode(m.out, m.outMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
+
+	windows.SetConsoleMode(m.err, m.errMode|windows.DISABLE_NEWLINE_AUTO_RETURN)
+
+	return nil
+}
+
+func (m *master) Reset() error {
+	for _, s := range []struct {
+		fd   windows.Handle
+		mode uint32
+	}{
+		{m.in, m.inMode},
+		{m.out, m.outMode},
+		{m.err, m.errMode},
+	} {
+		if err := windows.SetConsoleMode(s.fd, s.mode); err != nil {
+			return errors.Wrap(err, "unable to restore console mode")
+		}
+	}
+
+	return nil
+}
+
+func (m *master) Size() (WinSize, error) {
+	var info windows.ConsoleScreenBufferInfo
+	err := windows.GetConsoleScreenBufferInfo(m.out, &info)
+	if err != nil {
+		return WinSize{}, errors.Wrap(err, "unable to get console info")
+	}
+
+	winsize := WinSize{
+		Width:  uint16(info.Window.Right - info.Window.Left + 1),
+		Height: uint16(info.Window.Bottom - info.Window.Top + 1),
+	}
+
+	return winsize, nil
+}
+
+func (m *master) Resize(ws WinSize) error {
+	return ErrNotImplemented
+}
+
+func (m *master) ResizeFrom(c Console) error {
+	return ErrNotImplemented
+}
+
+func (m *master) DisableEcho() error {
+	mode := m.inMode &^ windows.ENABLE_ECHO_INPUT
+	mode |= windows.ENABLE_PROCESSED_INPUT
+	mode |= windows.ENABLE_LINE_INPUT
+
+	if err := windows.SetConsoleMode(m.in, mode); err != nil {
+		return errors.Wrap(err, "unable to set console to disable echo")
+	}
+
+	return nil
+}
+
+func (m *master) Close() error {
+	return nil
+}
+
+func (m *master) Read(b []byte) (int, error) {
+	panic("not implemented on windows")
+}
+
+func (m *master) Write(b []byte) (int, error) {
+	panic("not implemented on windows")
+}
+
+func (m *master) Fd() uintptr {
+	return uintptr(m.in)
+}
+
+// on windows, console can only be made from os.Std{in,out,err}, hence there
+// isnt a single name here we can use. Return a dummy "console" value in this
+// case should be sufficient.
+func (m *master) Name() string {
+	return "console"
+}
+
+// makeInputRaw puts the terminal (Windows Console) connected to the given
+// file descriptor into raw mode
+func makeInputRaw(fd windows.Handle, mode uint32) error {
+	// See
+	// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+	// -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx
+
+	// Disable these modes
+	mode &^= windows.ENABLE_ECHO_INPUT
+	mode &^= windows.ENABLE_LINE_INPUT
+	mode &^= windows.ENABLE_MOUSE_INPUT
+	mode &^= windows.ENABLE_WINDOW_INPUT
+	mode &^= windows.ENABLE_PROCESSED_INPUT
+
+	// Enable these modes
+	mode |= windows.ENABLE_EXTENDED_FLAGS
+	mode |= windows.ENABLE_INSERT_MODE
+	mode |= windows.ENABLE_QUICK_EDIT_MODE
+
+	if vtInputSupported {
+		mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT
+	}
+
+	if err := windows.SetConsoleMode(fd, mode); err != nil {
+		return errors.Wrap(err, "unable to set console to raw mode")
+	}
+
+	return nil
+}
+
+func checkConsole(f *os.File) error {
+	var mode uint32
+	if err := windows.GetConsoleMode(windows.Handle(f.Fd()), &mode); err != nil {
+		return err
+	}
+	return nil
+}
+
+func newMaster(f *os.File) (Console, error) {
+	if f != os.Stdin && f != os.Stdout && f != os.Stderr {
+		return nil, errors.New("creating a console from a file is not supported on windows")
+	}
+	m := &master{}
+	m.initStdios()
+	return m, nil
+}
diff --git a/vendor/github.com/containerd/console/tc_darwin.go b/vendor/github.com/containerd/console/tc_darwin.go
new file mode 100644
index 0000000..b102bad
--- /dev/null
+++ b/vendor/github.com/containerd/console/tc_darwin.go
@@ -0,0 +1,37 @@
+package console
+
+import (
+	"fmt"
+	"os"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TIOCGETA
+	cmdTcSet = unix.TIOCSETA
+)
+
+func ioctl(fd, flag, data uintptr) error {
+	if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
+		return err
+	}
+	return nil
+}
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+	var u int32
+	return ioctl(f.Fd(), unix.TIOCPTYUNLK, uintptr(unsafe.Pointer(&u)))
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCPTYGNAME)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("/dev/pts/%d", n), nil
+}
diff --git a/vendor/github.com/containerd/console/tc_freebsd.go b/vendor/github.com/containerd/console/tc_freebsd.go
new file mode 100644
index 0000000..e2a10e4
--- /dev/null
+++ b/vendor/github.com/containerd/console/tc_freebsd.go
@@ -0,0 +1,29 @@
+package console
+
+import (
+	"fmt"
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TIOCGETA
+	cmdTcSet = unix.TIOCSETA
+)
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+// This does not exist on FreeBSD, it does not allocate controlling terminals on open
+func unlockpt(f *os.File) error {
+	return nil
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("/dev/pts/%d", n), nil
+}
diff --git a/vendor/github.com/containerd/console/tc_linux.go b/vendor/github.com/containerd/console/tc_linux.go
new file mode 100644
index 0000000..80ef2f6
--- /dev/null
+++ b/vendor/github.com/containerd/console/tc_linux.go
@@ -0,0 +1,37 @@
+package console
+
+import (
+	"fmt"
+	"os"
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TCGETS
+	cmdTcSet = unix.TCSETS
+)
+
+func ioctl(fd, flag, data uintptr) error {
+	if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
+		return err
+	}
+	return nil
+}
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+	var u int32
+	return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
+}
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	n, err := unix.IoctlGetInt(int(f.Fd()), unix.TIOCGPTN)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("/dev/pts/%d", n), nil
+}
diff --git a/vendor/github.com/containerd/console/tc_solaris_cgo.go b/vendor/github.com/containerd/console/tc_solaris_cgo.go
new file mode 100644
index 0000000..f8066d8
--- /dev/null
+++ b/vendor/github.com/containerd/console/tc_solaris_cgo.go
@@ -0,0 +1,35 @@
+// +build solaris,cgo
+
+package console
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+//#include <stdlib.h>
+import "C"
+
+const (
+	cmdTcGet = unix.TCGETS
+	cmdTcSet = unix.TCSETS
+)
+
+// ptsname retrieves the name of the first available pts for the given master.
+func ptsname(f *os.File) (string, error) {
+	ptspath, err := C.ptsname(C.int(f.Fd()))
+	if err != nil {
+		return "", err
+	}
+	return C.GoString(ptspath), nil
+}
+
+// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
+// unlockpt should be called before opening the slave side of a pty.
+func unlockpt(f *os.File) error {
+	if _, err := C.grantpt(C.int(f.Fd())); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/console/tc_solaris_nocgo.go b/vendor/github.com/containerd/console/tc_solaris_nocgo.go
new file mode 100644
index 0000000..0aefa0d
--- /dev/null
+++ b/vendor/github.com/containerd/console/tc_solaris_nocgo.go
@@ -0,0 +1,31 @@
+// +build solaris,!cgo
+
+//
+// Implementing the functions below requires cgo support.  Non-cgo stubs
+// versions are defined below to enable cross-compilation of source code
+// that depends on these functions, but the resultant cross-compiled
+// binaries cannot actually be used.  If the stub function(s) below are
+// actually invoked they will display an error message and cause the
+// calling process to exit.
+//
+
+package console
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	cmdTcGet = unix.TCGETS
+	cmdTcSet = unix.TCSETS
+)
+
+func ptsname(f *os.File) (string, error) {
+	panic("ptsname() support requires cgo.")
+}
+
+func unlockpt(f *os.File) error {
+	panic("unlockpt() support requires cgo.")
+}
diff --git a/vendor/github.com/containerd/console/tc_unix.go b/vendor/github.com/containerd/console/tc_unix.go
new file mode 100644
index 0000000..df7dcb9
--- /dev/null
+++ b/vendor/github.com/containerd/console/tc_unix.go
@@ -0,0 +1,75 @@
+// +build darwin freebsd linux solaris
+
+package console
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+func tcget(fd uintptr, p *unix.Termios) error {
+	termios, err := unix.IoctlGetTermios(int(fd), cmdTcGet)
+	if err != nil {
+		return err
+	}
+	*p = *termios
+	return nil
+}
+
+func tcset(fd uintptr, p *unix.Termios) error {
+	return unix.IoctlSetTermios(int(fd), cmdTcSet, p)
+}
+
+func tcgwinsz(fd uintptr) (WinSize, error) {
+	var ws WinSize
+
+	uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ)
+	if err != nil {
+		return ws, err
+	}
+
+	// Translate from unix.Winsize to console.WinSize
+	ws.Height = uws.Row
+	ws.Width = uws.Col
+	ws.x = uws.Xpixel
+	ws.y = uws.Ypixel
+	return ws, nil
+}
+
+func tcswinsz(fd uintptr, ws WinSize) error {
+	// Translate from console.WinSize to unix.Winsize
+
+	var uws unix.Winsize
+	uws.Row = ws.Height
+	uws.Col = ws.Width
+	uws.Xpixel = ws.x
+	uws.Ypixel = ws.y
+
+	return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, &uws)
+}
+
+func setONLCR(fd uintptr, enable bool) error {
+	var termios unix.Termios
+	if err := tcget(fd, &termios); err != nil {
+		return err
+	}
+	if enable {
+		// Set +onlcr so we can act like a real terminal
+		termios.Oflag |= unix.ONLCR
+	} else {
+		// Set -onlcr so we don't have to deal with \r.
+		termios.Oflag &^= unix.ONLCR
+	}
+	return tcset(fd, &termios)
+}
+
+func cfmakeraw(t unix.Termios) unix.Termios {
+	t.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
+	t.Oflag &^= unix.OPOST
+	t.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
+	t.Cflag &^= (unix.CSIZE | unix.PARENB)
+	t.Cflag &^= unix.CS8
+	t.Cc[unix.VMIN] = 1
+	t.Cc[unix.VTIME] = 0
+
+	return t
+}
diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md
index 6741966..28635db 100644
--- a/vendor/github.com/containerd/containerd/README.md
+++ b/vendor/github.com/containerd/containerd/README.md
@@ -1,70 +1,206 @@
-# containerd
+![banner](/docs/images/containerd-dark.png?raw=true)
 
-containerd is a daemon to control runC, built for performance and density. 
-containerd leverages runC's advanced features such as seccomp and user namespace support as well
-as checkpoint and restore for cloning and live migration of containers.
+[![GoDoc](https://godoc.org/github.com/containerd/containerd?status.svg)](https://godoc.org/github.com/containerd/containerd)
+[![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd)
+[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield)
+[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd)
 
-## Getting started
+containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc.
 
-The easiest way to start using containerd is to download binaries from the [releases page](https://github.com/containerd/containerd/releases).
+containerd is designed to be embedded into a larger system, rather than being used directly by developers or end-users.
 
-The included `ctr` command-line tool allows you interact with the containerd daemon:
+![architecture](design/architecture.png)
+
+## Getting Started
+
+If you are interested in trying out containerd please see our [Getting Started Guide](docs/getting-started.md).
+
+## Features
+
+### Client
+
+containerd offers a full client package to help you integrate containerd into your platform.
+
+```go
+
+import "github.com/containerd/containerd"
+
+func main() {
+	client, err := containerd.New("/run/containerd/containerd.sock")
+	defer client.Close()
+}
 
 ```
-$ sudo ctr containers start redis /containers/redis
-$ sudo ctr containers list
-ID                  PATH                STATUS              PROCESSES
-redis               /containers/redis   running             14063
+
+### Namespaces
+
+Namespaces allow multiple consumers to use the same containerd without conflicting with each other.  It has the benefit of sharing content but still having separation with containers and images.
+
+To set a namespace for requests to the API:
+
+```go
+context    = context.Background()
+// create a context for docker
+docker = namespaces.WithNamespace(context, "docker")
+
+containerd, err := client.NewContainer(docker, "id")
 ```
 
-`/containers/redis` is the path to an OCI bundle. [See the docs for more information.](docs/bundle.md)
+To set a default namespace on the client:
 
-## Docs
-
- * [Client CLI reference (`ctr`)](docs/cli.md)
- * [Daemon CLI reference (`containerd`)](docs/daemon.md)
- * [Creating OCI bundles](docs/bundle.md)
- * [containerd changes to the bundle](docs/bundle-changes.md)
- * [Attaching to STDIO or TTY](docs/attach.md)
- * [Telemetry and metrics](docs/telemetry.md)
-
-All documentation is contained in the `/docs` directory in this repository.
-
-## Building
-
-You will need to make sure that you have Go installed on your system and the containerd repository is cloned
-in your `$GOPATH`.  You will also need to make sure that you have all the dependencies cloned as well.
-Currently, contributing to containerd is not for the first time devs as many dependencies are not vendored and 
-work is being completed at a high rate.  
-
-After that just run `make` and the binaries for the daemon and client will be localed in the `bin/` directory.
-
-## Performance
-
-Starting 1000 containers concurrently runs at 126-140 containers per second.
-
-Overall start times:
-
-```
-[containerd] 2015/12/04 15:00:54   count:        1000
-[containerd] 2015/12/04 14:59:54   min:          23ms
-[containerd] 2015/12/04 14:59:54   max:         355ms
-[containerd] 2015/12/04 14:59:54   mean:         78ms
-[containerd] 2015/12/04 14:59:54   stddev:       34ms
-[containerd] 2015/12/04 14:59:54   median:       73ms
-[containerd] 2015/12/04 14:59:54   75%:          91ms
-[containerd] 2015/12/04 14:59:54   95%:         123ms
-[containerd] 2015/12/04 14:59:54   99%:         287ms
-[containerd] 2015/12/04 14:59:54   99.9%:       355ms
+```go
+client, err := containerd.New(address, containerd.WithDefaultNamespace("docker"))
 ```
 
-## Roadmap
+### Distribution
 
-The current roadmap and milestones for alpha and beta completion are in the github issues on this repository.  Please refer to these issues for what is being worked on and completed for the various stages of development.
+```go
+// pull an image
+image, err := client.Pull(context, "docker.io/library/redis:latest")
+
+// push an image
+err := client.Push(context, "docker.io/library/redis:latest", image.Target())
+```
+
+### Containers
+
+In containerd, a container is a metadata object.  Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container.
+
+```go
+redis, err := client.NewContainer(context, "redis-master")
+defer redis.Delete(context)
+```
+
+### OCI Runtime Specification
+
+containerd fully supports the OCI runtime specification for running containers.  We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
+
+You can specify options when creating a container about how to modify the specification.
+
+```go
+redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(containerd.WithImageConfig(image)))
+```
+
+### Root Filesystems
+
+containerd allows you to use overlay or snapshot filesystems with your containers.  It comes with builtin support for overlayfs and btrfs.
+
+```go
+// pull an image and unpack it into the configured snapshotter
+image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.WithPullUnpack)
+
+// allocate a new RW root filesystem for a container based on the image
+redis, err := client.NewContainer(context, "redis-master",
+	containerd.WithNewSnapshot("redis-rootfs", image),
+	containerd.WithNewSpec(containerd.WithImageConfig(image)),
+
+)
+
+// use a readonly filesystem with multiple containers
+for i := 0; i < 10; i++ {
+	id := fmt.Sprintf("id-%s", i)
+	container, err := client.NewContainer(ctx, id,
+		containerd.WithNewSnapshotView(id, image),
+		containerd.WithNewSpec(containerd.WithImageConfig(image)),
+	)
+}
+```
+
+### Tasks
+
+Taking a container object and turning it into a runnable process on a system is done by creating a new `Task` from the container.  A task represents the runnable object within containerd.
+
+```go
+// create a new task
+task, err := redis.NewTask(context, containerd.Stdio)
+defer task.Delete(context)
+
+// the task is now running and has a pid that can be use to setup networking
+// or other runtime settings outside of containerd
+pid := task.Pid()
+
+// start the redis-server process inside the container
+err := task.Start(context)
+
+// wait for the task to exit and get the exit status
+status, err := task.Wait(context)
+```
+
+### Checkpoint and Restore
+
+If you have [criu](https://criu.org/Main_Page) installed on your machine you can checkpoint and restore containers and their tasks.  This allow you to clone and/or live migrate containers to other machines.
+
+```go
+// checkpoint the task then push it to a registry
+checkpoint, err := task.Checkpoint(context, containerd.WithExit)
+
+err := client.Push(context, "myregistry/checkpoints/redis:master", checkpoint)
+
+// on a new machine pull the checkpoint and restore the redis container
+image, err := client.Pull(context, "myregistry/checkpoints/redis:master")
+
+checkpoint := image.Target()
+
+redis, err = client.NewContainer(context, "redis-master", containerd.WithCheckpoint(checkpoint, "redis-rootfs"))
+defer container.Delete(context)
+
+task, err = redis.NewTask(context, containerd.Stdio, containerd.WithTaskCheckpoint(checkpoint))
+defer task.Delete(context)
+
+err := task.Start(context)
+```
+
+## Developer Quick-Start
+
+To build the daemon and `ctr` simple test client, the following build system dependencies are required:
+
+* Go 1.9.x or above
+* Protoc 3.x compiler and headers (download at the [Google protobuf releases page](https://github.com/google/protobuf/releases))
+* Btrfs headers and libraries for your distribution. Note that building the btrfs driver can be disabled via build tag removing this dependency.
+
+For proper results, install the `protoc` release into `/usr/local` on your build system. For example, the following commands will download and install the 3.1.0 release for a 64-bit Linux host:
+
+```
+$ wget -c https://github.com/google/protobuf/releases/download/v3.1.0/protoc-3.1.0-linux-x86_64.zip
+$ sudo unzip protoc-3.1.0-linux-x86_64.zip -d /usr/local
+```
+
+With the required dependencies installed, the `Makefile` target named **binaries** will compile the `ctr` and `containerd` binaries and place them in the `bin/` directory. Using `sudo make install` will place the binaries in `/usr/local/bin`. When making any changes to the gRPC API, `make generate` will use the installed `protoc` compiler to regenerate the API generated code packages.
+
+> *Note*: A build tag is currently available to disable building the btrfs snapshot driver.
+> Adding `BUILDTAGS=no_btrfs` to your environment before calling the **binaries**
+> Makefile target will disable the btrfs driver within the containerd Go build.
+
+Vendoring of external imports uses the [`vndr` tool](https://github.com/LK4D4/vndr) which uses a simple config file, `vendor.conf`, to provide the URL and version or hash details for each vendored import. After modifying `vendor.conf` run the `vndr` tool to update the `vendor/` directory contents. Combining the `vendor.conf` update with the changeset in `vendor/` after running `vndr` should become a single commit for a PR which relies on vendored updates.
+
+Please refer to [RUNC.md](/RUNC.md) for the currently supported version of `runc` that is used by containerd.
+
+### Releases and API Stability
+
+Please see [RELEASES.md](RELEASES.md) for details on versioning and stability
+of containerd components.
+
+### Development reports.
+
+Weekly summary on the progress and what is being worked on.
+https://github.com/containerd/containerd/tree/master/reports
+
+### Communication
+
+For async communication and long running discussions please use issues and pull requests on the github repo.
+This will be the best place to discuss design and implementation.
+
+For sync communication we have a community slack with a #containerd channel that everyone is welcome to join and chat about development.
+
+**Slack:** https://dockr.ly/community
+
+### Reporting security issues
+
+__If you are reporting a security issue, please reach out discreetly at security@containerd.io__.
 
 ## Copyright and license
 
-Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code
+Copyright ©2016-2017 Docker, Inc. All rights reserved, except as follows. Code
 is released under the Apache 2.0 license. The README.md file, and files in the
 "docs" folder are licensed under the Creative Commons Attribution 4.0
 International License under the terms and conditions set forth in the file
diff --git a/vendor/github.com/containerd/containerd/api/README.md b/vendor/github.com/containerd/containerd/api/README.md
new file mode 100644
index 0000000..f6eb28c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/README.md
@@ -0,0 +1,18 @@
+This directory contains the GRPC API definitions for containerd.
+
+All defined services and messages have been aggregated into `*.pb.txt`
+descriptors files in this directory. Definitions present here are considered
+frozen after the release.
+
+At release time, the current `next.pb.txt` file will be moved into place to
+freeze the API changes for the minor version. For example, when 1.0.0 is
+released, `next.pb.txt` should be moved to `1.0.txt`. Notice that we leave off
+the patch number, since the API will be completely locked down for a given
+patch series.
+
+We may find that by default, protobuf descriptors are too noisy to lock down
+API changes. In that case, we may filter out certain fields in the descriptors,
+possibly regenerating for old versions.
+
+This process is similar to the [process used to ensure backwards compatibility
+in Go](https://github.com/golang/go/tree/master/api).
diff --git a/vendor/github.com/containerd/containerd/api/grpc/types/api.pb.go b/vendor/github.com/containerd/containerd/api/grpc/types/api.pb.go
deleted file mode 100644
index 8f14a18..0000000
--- a/vendor/github.com/containerd/containerd/api/grpc/types/api.pb.go
+++ /dev/null
@@ -1,2601 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// source: api.proto
-
-/*
-Package types is a generated protocol buffer package.
-
-It is generated from these files:
-	api.proto
-
-It has these top-level messages:
-	GetServerVersionRequest
-	GetServerVersionResponse
-	UpdateProcessRequest
-	UpdateProcessResponse
-	CreateContainerRequest
-	CreateContainerResponse
-	SignalRequest
-	SignalResponse
-	AddProcessRequest
-	Rlimit
-	User
-	AddProcessResponse
-	CreateCheckpointRequest
-	CreateCheckpointResponse
-	DeleteCheckpointRequest
-	DeleteCheckpointResponse
-	ListCheckpointRequest
-	Checkpoint
-	ListCheckpointResponse
-	StateRequest
-	ContainerState
-	Process
-	Container
-	Machine
-	StateResponse
-	UpdateContainerRequest
-	UpdateResource
-	BlockIODevice
-	WeightDevice
-	ThrottleDevice
-	UpdateContainerResponse
-	EventsRequest
-	Event
-	NetworkStats
-	CpuUsage
-	ThrottlingData
-	CpuStats
-	PidsStats
-	MemoryData
-	MemoryStats
-	BlkioStatsEntry
-	BlkioStats
-	HugetlbStats
-	CgroupStats
-	StatsResponse
-	StatsRequest
-*/
-package types
-
-import proto "github.com/golang/protobuf/proto"
-import fmt "fmt"
-import math "math"
-import google_protobuf "github.com/golang/protobuf/ptypes/timestamp"
-
-import (
-	context "golang.org/x/net/context"
-	grpc "google.golang.org/grpc"
-)
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
-
-type GetServerVersionRequest struct {
-}
-
-func (m *GetServerVersionRequest) Reset()                    { *m = GetServerVersionRequest{} }
-func (m *GetServerVersionRequest) String() string            { return proto.CompactTextString(m) }
-func (*GetServerVersionRequest) ProtoMessage()               {}
-func (*GetServerVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
-
-type GetServerVersionResponse struct {
-	Major    uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
-	Minor    uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
-	Patch    uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
-	Revision string `protobuf:"bytes,4,opt,name=revision" json:"revision,omitempty"`
-}
-
-func (m *GetServerVersionResponse) Reset()                    { *m = GetServerVersionResponse{} }
-func (m *GetServerVersionResponse) String() string            { return proto.CompactTextString(m) }
-func (*GetServerVersionResponse) ProtoMessage()               {}
-func (*GetServerVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
-
-func (m *GetServerVersionResponse) GetMajor() uint32 {
-	if m != nil {
-		return m.Major
-	}
-	return 0
-}
-
-func (m *GetServerVersionResponse) GetMinor() uint32 {
-	if m != nil {
-		return m.Minor
-	}
-	return 0
-}
-
-func (m *GetServerVersionResponse) GetPatch() uint32 {
-	if m != nil {
-		return m.Patch
-	}
-	return 0
-}
-
-func (m *GetServerVersionResponse) GetRevision() string {
-	if m != nil {
-		return m.Revision
-	}
-	return ""
-}
-
-type UpdateProcessRequest struct {
-	Id         string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	Pid        string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"`
-	CloseStdin bool   `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"`
-	Width      uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"`
-	Height     uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"`
-}
-
-func (m *UpdateProcessRequest) Reset()                    { *m = UpdateProcessRequest{} }
-func (m *UpdateProcessRequest) String() string            { return proto.CompactTextString(m) }
-func (*UpdateProcessRequest) ProtoMessage()               {}
-func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
-
-func (m *UpdateProcessRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *UpdateProcessRequest) GetPid() string {
-	if m != nil {
-		return m.Pid
-	}
-	return ""
-}
-
-func (m *UpdateProcessRequest) GetCloseStdin() bool {
-	if m != nil {
-		return m.CloseStdin
-	}
-	return false
-}
-
-func (m *UpdateProcessRequest) GetWidth() uint32 {
-	if m != nil {
-		return m.Width
-	}
-	return 0
-}
-
-func (m *UpdateProcessRequest) GetHeight() uint32 {
-	if m != nil {
-		return m.Height
-	}
-	return 0
-}
-
-type UpdateProcessResponse struct {
-}
-
-func (m *UpdateProcessResponse) Reset()                    { *m = UpdateProcessResponse{} }
-func (m *UpdateProcessResponse) String() string            { return proto.CompactTextString(m) }
-func (*UpdateProcessResponse) ProtoMessage()               {}
-func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
-
-type CreateContainerRequest struct {
-	Id            string   `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	BundlePath    string   `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
-	Checkpoint    string   `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"`
-	Stdin         string   `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"`
-	Stdout        string   `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"`
-	Stderr        string   `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"`
-	Labels        []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"`
-	NoPivotRoot   bool     `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"`
-	Runtime       string   `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"`
-	RuntimeArgs   []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"`
-	CheckpointDir string   `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"`
-}
-
-func (m *CreateContainerRequest) Reset()                    { *m = CreateContainerRequest{} }
-func (m *CreateContainerRequest) String() string            { return proto.CompactTextString(m) }
-func (*CreateContainerRequest) ProtoMessage()               {}
-func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
-
-func (m *CreateContainerRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *CreateContainerRequest) GetBundlePath() string {
-	if m != nil {
-		return m.BundlePath
-	}
-	return ""
-}
-
-func (m *CreateContainerRequest) GetCheckpoint() string {
-	if m != nil {
-		return m.Checkpoint
-	}
-	return ""
-}
-
-func (m *CreateContainerRequest) GetStdin() string {
-	if m != nil {
-		return m.Stdin
-	}
-	return ""
-}
-
-func (m *CreateContainerRequest) GetStdout() string {
-	if m != nil {
-		return m.Stdout
-	}
-	return ""
-}
-
-func (m *CreateContainerRequest) GetStderr() string {
-	if m != nil {
-		return m.Stderr
-	}
-	return ""
-}
-
-func (m *CreateContainerRequest) GetLabels() []string {
-	if m != nil {
-		return m.Labels
-	}
-	return nil
-}
-
-func (m *CreateContainerRequest) GetNoPivotRoot() bool {
-	if m != nil {
-		return m.NoPivotRoot
-	}
-	return false
-}
-
-func (m *CreateContainerRequest) GetRuntime() string {
-	if m != nil {
-		return m.Runtime
-	}
-	return ""
-}
-
-func (m *CreateContainerRequest) GetRuntimeArgs() []string {
-	if m != nil {
-		return m.RuntimeArgs
-	}
-	return nil
-}
-
-func (m *CreateContainerRequest) GetCheckpointDir() string {
-	if m != nil {
-		return m.CheckpointDir
-	}
-	return ""
-}
-
-type CreateContainerResponse struct {
-	Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"`
-}
-
-func (m *CreateContainerResponse) Reset()                    { *m = CreateContainerResponse{} }
-func (m *CreateContainerResponse) String() string            { return proto.CompactTextString(m) }
-func (*CreateContainerResponse) ProtoMessage()               {}
-func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
-
-func (m *CreateContainerResponse) GetContainer() *Container {
-	if m != nil {
-		return m.Container
-	}
-	return nil
-}
-
-type SignalRequest struct {
-	Id     string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	Pid    string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"`
-	Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"`
-}
-
-func (m *SignalRequest) Reset()                    { *m = SignalRequest{} }
-func (m *SignalRequest) String() string            { return proto.CompactTextString(m) }
-func (*SignalRequest) ProtoMessage()               {}
-func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
-
-func (m *SignalRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *SignalRequest) GetPid() string {
-	if m != nil {
-		return m.Pid
-	}
-	return ""
-}
-
-func (m *SignalRequest) GetSignal() uint32 {
-	if m != nil {
-		return m.Signal
-	}
-	return 0
-}
-
-type SignalResponse struct {
-}
-
-func (m *SignalResponse) Reset()                    { *m = SignalResponse{} }
-func (m *SignalResponse) String() string            { return proto.CompactTextString(m) }
-func (*SignalResponse) ProtoMessage()               {}
-func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
-
-type AddProcessRequest struct {
-	Id              string    `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	Terminal        bool      `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"`
-	User            *User     `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"`
-	Args            []string  `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"`
-	Env             []string  `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
-	Cwd             string    `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"`
-	Pid             string    `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"`
-	Stdin           string    `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"`
-	Stdout          string    `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"`
-	Stderr          string    `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"`
-	Capabilities    []string  `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"`
-	ApparmorProfile string    `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"`
-	SelinuxLabel    string    `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"`
-	NoNewPrivileges bool      `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"`
-	Rlimits         []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"`
-}
-
-func (m *AddProcessRequest) Reset()                    { *m = AddProcessRequest{} }
-func (m *AddProcessRequest) String() string            { return proto.CompactTextString(m) }
-func (*AddProcessRequest) ProtoMessage()               {}
-func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
-
-func (m *AddProcessRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *AddProcessRequest) GetTerminal() bool {
-	if m != nil {
-		return m.Terminal
-	}
-	return false
-}
-
-func (m *AddProcessRequest) GetUser() *User {
-	if m != nil {
-		return m.User
-	}
-	return nil
-}
-
-func (m *AddProcessRequest) GetArgs() []string {
-	if m != nil {
-		return m.Args
-	}
-	return nil
-}
-
-func (m *AddProcessRequest) GetEnv() []string {
-	if m != nil {
-		return m.Env
-	}
-	return nil
-}
-
-func (m *AddProcessRequest) GetCwd() string {
-	if m != nil {
-		return m.Cwd
-	}
-	return ""
-}
-
-func (m *AddProcessRequest) GetPid() string {
-	if m != nil {
-		return m.Pid
-	}
-	return ""
-}
-
-func (m *AddProcessRequest) GetStdin() string {
-	if m != nil {
-		return m.Stdin
-	}
-	return ""
-}
-
-func (m *AddProcessRequest) GetStdout() string {
-	if m != nil {
-		return m.Stdout
-	}
-	return ""
-}
-
-func (m *AddProcessRequest) GetStderr() string {
-	if m != nil {
-		return m.Stderr
-	}
-	return ""
-}
-
-func (m *AddProcessRequest) GetCapabilities() []string {
-	if m != nil {
-		return m.Capabilities
-	}
-	return nil
-}
-
-func (m *AddProcessRequest) GetApparmorProfile() string {
-	if m != nil {
-		return m.ApparmorProfile
-	}
-	return ""
-}
-
-func (m *AddProcessRequest) GetSelinuxLabel() string {
-	if m != nil {
-		return m.SelinuxLabel
-	}
-	return ""
-}
-
-func (m *AddProcessRequest) GetNoNewPrivileges() bool {
-	if m != nil {
-		return m.NoNewPrivileges
-	}
-	return false
-}
-
-func (m *AddProcessRequest) GetRlimits() []*Rlimit {
-	if m != nil {
-		return m.Rlimits
-	}
-	return nil
-}
-
-type Rlimit struct {
-	Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Soft uint64 `protobuf:"varint,2,opt,name=soft" json:"soft,omitempty"`
-	Hard uint64 `protobuf:"varint,3,opt,name=hard" json:"hard,omitempty"`
-}
-
-func (m *Rlimit) Reset()                    { *m = Rlimit{} }
-func (m *Rlimit) String() string            { return proto.CompactTextString(m) }
-func (*Rlimit) ProtoMessage()               {}
-func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
-
-func (m *Rlimit) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *Rlimit) GetSoft() uint64 {
-	if m != nil {
-		return m.Soft
-	}
-	return 0
-}
-
-func (m *Rlimit) GetHard() uint64 {
-	if m != nil {
-		return m.Hard
-	}
-	return 0
-}
-
-type User struct {
-	Uid            uint32   `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"`
-	Gid            uint32   `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"`
-	AdditionalGids []uint32 `protobuf:"varint,3,rep,packed,name=additionalGids" json:"additionalGids,omitempty"`
-}
-
-func (m *User) Reset()                    { *m = User{} }
-func (m *User) String() string            { return proto.CompactTextString(m) }
-func (*User) ProtoMessage()               {}
-func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
-
-func (m *User) GetUid() uint32 {
-	if m != nil {
-		return m.Uid
-	}
-	return 0
-}
-
-func (m *User) GetGid() uint32 {
-	if m != nil {
-		return m.Gid
-	}
-	return 0
-}
-
-func (m *User) GetAdditionalGids() []uint32 {
-	if m != nil {
-		return m.AdditionalGids
-	}
-	return nil
-}
-
-type AddProcessResponse struct {
-	SystemPid uint32 `protobuf:"varint,1,opt,name=systemPid" json:"systemPid,omitempty"`
-}
-
-func (m *AddProcessResponse) Reset()                    { *m = AddProcessResponse{} }
-func (m *AddProcessResponse) String() string            { return proto.CompactTextString(m) }
-func (*AddProcessResponse) ProtoMessage()               {}
-func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
-
-func (m *AddProcessResponse) GetSystemPid() uint32 {
-	if m != nil {
-		return m.SystemPid
-	}
-	return 0
-}
-
-type CreateCheckpointRequest struct {
-	Id            string      `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	Checkpoint    *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"`
-	CheckpointDir string      `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"`
-}
-
-func (m *CreateCheckpointRequest) Reset()                    { *m = CreateCheckpointRequest{} }
-func (m *CreateCheckpointRequest) String() string            { return proto.CompactTextString(m) }
-func (*CreateCheckpointRequest) ProtoMessage()               {}
-func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
-
-func (m *CreateCheckpointRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint {
-	if m != nil {
-		return m.Checkpoint
-	}
-	return nil
-}
-
-func (m *CreateCheckpointRequest) GetCheckpointDir() string {
-	if m != nil {
-		return m.CheckpointDir
-	}
-	return ""
-}
-
-type CreateCheckpointResponse struct {
-}
-
-func (m *CreateCheckpointResponse) Reset()                    { *m = CreateCheckpointResponse{} }
-func (m *CreateCheckpointResponse) String() string            { return proto.CompactTextString(m) }
-func (*CreateCheckpointResponse) ProtoMessage()               {}
-func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
-
-type DeleteCheckpointRequest struct {
-	Id            string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	Name          string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
-	CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"`
-}
-
-func (m *DeleteCheckpointRequest) Reset()                    { *m = DeleteCheckpointRequest{} }
-func (m *DeleteCheckpointRequest) String() string            { return proto.CompactTextString(m) }
-func (*DeleteCheckpointRequest) ProtoMessage()               {}
-func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
-
-func (m *DeleteCheckpointRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *DeleteCheckpointRequest) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *DeleteCheckpointRequest) GetCheckpointDir() string {
-	if m != nil {
-		return m.CheckpointDir
-	}
-	return ""
-}
-
-type DeleteCheckpointResponse struct {
-}
-
-func (m *DeleteCheckpointResponse) Reset()                    { *m = DeleteCheckpointResponse{} }
-func (m *DeleteCheckpointResponse) String() string            { return proto.CompactTextString(m) }
-func (*DeleteCheckpointResponse) ProtoMessage()               {}
-func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
-
-type ListCheckpointRequest struct {
-	Id            string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"`
-}
-
-func (m *ListCheckpointRequest) Reset()                    { *m = ListCheckpointRequest{} }
-func (m *ListCheckpointRequest) String() string            { return proto.CompactTextString(m) }
-func (*ListCheckpointRequest) ProtoMessage()               {}
-func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
-
-func (m *ListCheckpointRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *ListCheckpointRequest) GetCheckpointDir() string {
-	if m != nil {
-		return m.CheckpointDir
-	}
-	return ""
-}
-
-type Checkpoint struct {
-	Name        string   `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	Exit        bool     `protobuf:"varint,2,opt,name=exit" json:"exit,omitempty"`
-	Tcp         bool     `protobuf:"varint,3,opt,name=tcp" json:"tcp,omitempty"`
-	UnixSockets bool     `protobuf:"varint,4,opt,name=unixSockets" json:"unixSockets,omitempty"`
-	Shell       bool     `protobuf:"varint,5,opt,name=shell" json:"shell,omitempty"`
-	EmptyNS     []string `protobuf:"bytes,6,rep,name=emptyNS" json:"emptyNS,omitempty"`
-}
-
-func (m *Checkpoint) Reset()                    { *m = Checkpoint{} }
-func (m *Checkpoint) String() string            { return proto.CompactTextString(m) }
-func (*Checkpoint) ProtoMessage()               {}
-func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
-
-func (m *Checkpoint) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *Checkpoint) GetExit() bool {
-	if m != nil {
-		return m.Exit
-	}
-	return false
-}
-
-func (m *Checkpoint) GetTcp() bool {
-	if m != nil {
-		return m.Tcp
-	}
-	return false
-}
-
-func (m *Checkpoint) GetUnixSockets() bool {
-	if m != nil {
-		return m.UnixSockets
-	}
-	return false
-}
-
-func (m *Checkpoint) GetShell() bool {
-	if m != nil {
-		return m.Shell
-	}
-	return false
-}
-
-func (m *Checkpoint) GetEmptyNS() []string {
-	if m != nil {
-		return m.EmptyNS
-	}
-	return nil
-}
-
-type ListCheckpointResponse struct {
-	Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"`
-}
-
-func (m *ListCheckpointResponse) Reset()                    { *m = ListCheckpointResponse{} }
-func (m *ListCheckpointResponse) String() string            { return proto.CompactTextString(m) }
-func (*ListCheckpointResponse) ProtoMessage()               {}
-func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
-
-func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint {
-	if m != nil {
-		return m.Checkpoints
-	}
-	return nil
-}
-
-type StateRequest struct {
-	Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-}
-
-func (m *StateRequest) Reset()                    { *m = StateRequest{} }
-func (m *StateRequest) String() string            { return proto.CompactTextString(m) }
-func (*StateRequest) ProtoMessage()               {}
-func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
-
-func (m *StateRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-type ContainerState struct {
-	Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
-}
-
-func (m *ContainerState) Reset()                    { *m = ContainerState{} }
-func (m *ContainerState) String() string            { return proto.CompactTextString(m) }
-func (*ContainerState) ProtoMessage()               {}
-func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
-
-func (m *ContainerState) GetStatus() string {
-	if m != nil {
-		return m.Status
-	}
-	return ""
-}
-
-type Process struct {
-	Pid             string    `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"`
-	Terminal        bool      `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"`
-	User            *User     `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"`
-	Args            []string  `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"`
-	Env             []string  `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"`
-	Cwd             string    `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"`
-	SystemPid       uint32    `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"`
-	Stdin           string    `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"`
-	Stdout          string    `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"`
-	Stderr          string    `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"`
-	Capabilities    []string  `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"`
-	ApparmorProfile string    `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"`
-	SelinuxLabel    string    `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"`
-	NoNewPrivileges bool      `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"`
-	Rlimits         []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"`
-}
-
-func (m *Process) Reset()                    { *m = Process{} }
-func (m *Process) String() string            { return proto.CompactTextString(m) }
-func (*Process) ProtoMessage()               {}
-func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} }
-
-func (m *Process) GetPid() string {
-	if m != nil {
-		return m.Pid
-	}
-	return ""
-}
-
-func (m *Process) GetTerminal() bool {
-	if m != nil {
-		return m.Terminal
-	}
-	return false
-}
-
-func (m *Process) GetUser() *User {
-	if m != nil {
-		return m.User
-	}
-	return nil
-}
-
-func (m *Process) GetArgs() []string {
-	if m != nil {
-		return m.Args
-	}
-	return nil
-}
-
-func (m *Process) GetEnv() []string {
-	if m != nil {
-		return m.Env
-	}
-	return nil
-}
-
-func (m *Process) GetCwd() string {
-	if m != nil {
-		return m.Cwd
-	}
-	return ""
-}
-
-func (m *Process) GetSystemPid() uint32 {
-	if m != nil {
-		return m.SystemPid
-	}
-	return 0
-}
-
-func (m *Process) GetStdin() string {
-	if m != nil {
-		return m.Stdin
-	}
-	return ""
-}
-
-func (m *Process) GetStdout() string {
-	if m != nil {
-		return m.Stdout
-	}
-	return ""
-}
-
-func (m *Process) GetStderr() string {
-	if m != nil {
-		return m.Stderr
-	}
-	return ""
-}
-
-func (m *Process) GetCapabilities() []string {
-	if m != nil {
-		return m.Capabilities
-	}
-	return nil
-}
-
-func (m *Process) GetApparmorProfile() string {
-	if m != nil {
-		return m.ApparmorProfile
-	}
-	return ""
-}
-
-func (m *Process) GetSelinuxLabel() string {
-	if m != nil {
-		return m.SelinuxLabel
-	}
-	return ""
-}
-
-func (m *Process) GetNoNewPrivileges() bool {
-	if m != nil {
-		return m.NoNewPrivileges
-	}
-	return false
-}
-
-func (m *Process) GetRlimits() []*Rlimit {
-	if m != nil {
-		return m.Rlimits
-	}
-	return nil
-}
-
-type Container struct {
-	Id         string     `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	BundlePath string     `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"`
-	Processes  []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"`
-	Status     string     `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
-	Labels     []string   `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"`
-	Pids       []uint32   `protobuf:"varint,6,rep,packed,name=pids" json:"pids,omitempty"`
-	Runtime    string     `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"`
-}
-
-func (m *Container) Reset()                    { *m = Container{} }
-func (m *Container) String() string            { return proto.CompactTextString(m) }
-func (*Container) ProtoMessage()               {}
-func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} }
-
-func (m *Container) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *Container) GetBundlePath() string {
-	if m != nil {
-		return m.BundlePath
-	}
-	return ""
-}
-
-func (m *Container) GetProcesses() []*Process {
-	if m != nil {
-		return m.Processes
-	}
-	return nil
-}
-
-func (m *Container) GetStatus() string {
-	if m != nil {
-		return m.Status
-	}
-	return ""
-}
-
-func (m *Container) GetLabels() []string {
-	if m != nil {
-		return m.Labels
-	}
-	return nil
-}
-
-func (m *Container) GetPids() []uint32 {
-	if m != nil {
-		return m.Pids
-	}
-	return nil
-}
-
-func (m *Container) GetRuntime() string {
-	if m != nil {
-		return m.Runtime
-	}
-	return ""
-}
-
-// Machine is information about machine on which containerd is run
-type Machine struct {
-	Cpus   uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"`
-	Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"`
-}
-
-func (m *Machine) Reset()                    { *m = Machine{} }
-func (m *Machine) String() string            { return proto.CompactTextString(m) }
-func (*Machine) ProtoMessage()               {}
-func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} }
-
-func (m *Machine) GetCpus() uint32 {
-	if m != nil {
-		return m.Cpus
-	}
-	return 0
-}
-
-func (m *Machine) GetMemory() uint64 {
-	if m != nil {
-		return m.Memory
-	}
-	return 0
-}
-
-// StateResponse is information about containerd daemon
-type StateResponse struct {
-	Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"`
-	Machine    *Machine     `protobuf:"bytes,2,opt,name=machine" json:"machine,omitempty"`
-}
-
-func (m *StateResponse) Reset()                    { *m = StateResponse{} }
-func (m *StateResponse) String() string            { return proto.CompactTextString(m) }
-func (*StateResponse) ProtoMessage()               {}
-func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} }
-
-func (m *StateResponse) GetContainers() []*Container {
-	if m != nil {
-		return m.Containers
-	}
-	return nil
-}
-
-func (m *StateResponse) GetMachine() *Machine {
-	if m != nil {
-		return m.Machine
-	}
-	return nil
-}
-
-type UpdateContainerRequest struct {
-	Id        string          `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-	Pid       string          `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"`
-	Status    string          `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"`
-	Resources *UpdateResource `protobuf:"bytes,4,opt,name=resources" json:"resources,omitempty"`
-}
-
-func (m *UpdateContainerRequest) Reset()                    { *m = UpdateContainerRequest{} }
-func (m *UpdateContainerRequest) String() string            { return proto.CompactTextString(m) }
-func (*UpdateContainerRequest) ProtoMessage()               {}
-func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} }
-
-func (m *UpdateContainerRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *UpdateContainerRequest) GetPid() string {
-	if m != nil {
-		return m.Pid
-	}
-	return ""
-}
-
-func (m *UpdateContainerRequest) GetStatus() string {
-	if m != nil {
-		return m.Status
-	}
-	return ""
-}
-
-func (m *UpdateContainerRequest) GetResources() *UpdateResource {
-	if m != nil {
-		return m.Resources
-	}
-	return nil
-}
-
-type UpdateResource struct {
-	BlkioWeight                  uint64            `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"`
-	CpuShares                    uint64            `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"`
-	CpuPeriod                    uint64            `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"`
-	CpuQuota                     uint64            `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"`
-	CpusetCpus                   string            `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"`
-	CpusetMems                   string            `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"`
-	MemoryLimit                  uint64            `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"`
-	MemorySwap                   uint64            `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"`
-	MemoryReservation            uint64            `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"`
-	KernelMemoryLimit            uint64            `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"`
-	KernelTCPMemoryLimit         uint64            `protobuf:"varint,11,opt,name=kernelTCPMemoryLimit" json:"kernelTCPMemoryLimit,omitempty"`
-	BlkioLeafWeight              uint64            `protobuf:"varint,12,opt,name=blkioLeafWeight" json:"blkioLeafWeight,omitempty"`
-	BlkioWeightDevice            []*WeightDevice   `protobuf:"bytes,13,rep,name=blkioWeightDevice" json:"blkioWeightDevice,omitempty"`
-	BlkioThrottleReadBpsDevice   []*ThrottleDevice `protobuf:"bytes,14,rep,name=blkioThrottleReadBpsDevice" json:"blkioThrottleReadBpsDevice,omitempty"`
-	BlkioThrottleWriteBpsDevice  []*ThrottleDevice `protobuf:"bytes,15,rep,name=blkioThrottleWriteBpsDevice" json:"blkioThrottleWriteBpsDevice,omitempty"`
-	BlkioThrottleReadIopsDevice  []*ThrottleDevice `protobuf:"bytes,16,rep,name=blkioThrottleReadIopsDevice" json:"blkioThrottleReadIopsDevice,omitempty"`
-	BlkioThrottleWriteIopsDevice []*ThrottleDevice `protobuf:"bytes,17,rep,name=blkioThrottleWriteIopsDevice" json:"blkioThrottleWriteIopsDevice,omitempty"`
-	PidsLimit                    uint64            `protobuf:"varint,18,opt,name=pidsLimit" json:"pidsLimit,omitempty"`
-	CpuRealtimePeriod            uint64            `protobuf:"varint,19,opt,name=cpuRealtimePeriod" json:"cpuRealtimePeriod,omitempty"`
-	CpuRealtimeRuntime           int64             `protobuf:"varint,20,opt,name=cpuRealtimeRuntime" json:"cpuRealtimeRuntime,omitempty"`
-}
-
-func (m *UpdateResource) Reset()                    { *m = UpdateResource{} }
-func (m *UpdateResource) String() string            { return proto.CompactTextString(m) }
-func (*UpdateResource) ProtoMessage()               {}
-func (*UpdateResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} }
-
-func (m *UpdateResource) GetBlkioWeight() uint64 {
-	if m != nil {
-		return m.BlkioWeight
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetCpuShares() uint64 {
-	if m != nil {
-		return m.CpuShares
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetCpuPeriod() uint64 {
-	if m != nil {
-		return m.CpuPeriod
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetCpuQuota() uint64 {
-	if m != nil {
-		return m.CpuQuota
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetCpusetCpus() string {
-	if m != nil {
-		return m.CpusetCpus
-	}
-	return ""
-}
-
-func (m *UpdateResource) GetCpusetMems() string {
-	if m != nil {
-		return m.CpusetMems
-	}
-	return ""
-}
-
-func (m *UpdateResource) GetMemoryLimit() uint64 {
-	if m != nil {
-		return m.MemoryLimit
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetMemorySwap() uint64 {
-	if m != nil {
-		return m.MemorySwap
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetMemoryReservation() uint64 {
-	if m != nil {
-		return m.MemoryReservation
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetKernelMemoryLimit() uint64 {
-	if m != nil {
-		return m.KernelMemoryLimit
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetKernelTCPMemoryLimit() uint64 {
-	if m != nil {
-		return m.KernelTCPMemoryLimit
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetBlkioLeafWeight() uint64 {
-	if m != nil {
-		return m.BlkioLeafWeight
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetBlkioWeightDevice() []*WeightDevice {
-	if m != nil {
-		return m.BlkioWeightDevice
-	}
-	return nil
-}
-
-func (m *UpdateResource) GetBlkioThrottleReadBpsDevice() []*ThrottleDevice {
-	if m != nil {
-		return m.BlkioThrottleReadBpsDevice
-	}
-	return nil
-}
-
-func (m *UpdateResource) GetBlkioThrottleWriteBpsDevice() []*ThrottleDevice {
-	if m != nil {
-		return m.BlkioThrottleWriteBpsDevice
-	}
-	return nil
-}
-
-func (m *UpdateResource) GetBlkioThrottleReadIopsDevice() []*ThrottleDevice {
-	if m != nil {
-		return m.BlkioThrottleReadIopsDevice
-	}
-	return nil
-}
-
-func (m *UpdateResource) GetBlkioThrottleWriteIopsDevice() []*ThrottleDevice {
-	if m != nil {
-		return m.BlkioThrottleWriteIopsDevice
-	}
-	return nil
-}
-
-func (m *UpdateResource) GetPidsLimit() uint64 {
-	if m != nil {
-		return m.PidsLimit
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetCpuRealtimePeriod() uint64 {
-	if m != nil {
-		return m.CpuRealtimePeriod
-	}
-	return 0
-}
-
-func (m *UpdateResource) GetCpuRealtimeRuntime() int64 {
-	if m != nil {
-		return m.CpuRealtimeRuntime
-	}
-	return 0
-}
-
-type BlockIODevice struct {
-	Major int64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
-	Minor int64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
-}
-
-func (m *BlockIODevice) Reset()                    { *m = BlockIODevice{} }
-func (m *BlockIODevice) String() string            { return proto.CompactTextString(m) }
-func (*BlockIODevice) ProtoMessage()               {}
-func (*BlockIODevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} }
-
-func (m *BlockIODevice) GetMajor() int64 {
-	if m != nil {
-		return m.Major
-	}
-	return 0
-}
-
-func (m *BlockIODevice) GetMinor() int64 {
-	if m != nil {
-		return m.Minor
-	}
-	return 0
-}
-
-type WeightDevice struct {
-	BlkIODevice *BlockIODevice `protobuf:"bytes,1,opt,name=blkIODevice" json:"blkIODevice,omitempty"`
-	Weight      uint32         `protobuf:"varint,2,opt,name=weight" json:"weight,omitempty"`
-	LeafWeight  uint32         `protobuf:"varint,3,opt,name=leafWeight" json:"leafWeight,omitempty"`
-}
-
-func (m *WeightDevice) Reset()                    { *m = WeightDevice{} }
-func (m *WeightDevice) String() string            { return proto.CompactTextString(m) }
-func (*WeightDevice) ProtoMessage()               {}
-func (*WeightDevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} }
-
-func (m *WeightDevice) GetBlkIODevice() *BlockIODevice {
-	if m != nil {
-		return m.BlkIODevice
-	}
-	return nil
-}
-
-func (m *WeightDevice) GetWeight() uint32 {
-	if m != nil {
-		return m.Weight
-	}
-	return 0
-}
-
-func (m *WeightDevice) GetLeafWeight() uint32 {
-	if m != nil {
-		return m.LeafWeight
-	}
-	return 0
-}
-
-type ThrottleDevice struct {
-	BlkIODevice *BlockIODevice `protobuf:"bytes,1,opt,name=blkIODevice" json:"blkIODevice,omitempty"`
-	Rate        uint64         `protobuf:"varint,2,opt,name=rate" json:"rate,omitempty"`
-}
-
-func (m *ThrottleDevice) Reset()                    { *m = ThrottleDevice{} }
-func (m *ThrottleDevice) String() string            { return proto.CompactTextString(m) }
-func (*ThrottleDevice) ProtoMessage()               {}
-func (*ThrottleDevice) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} }
-
-func (m *ThrottleDevice) GetBlkIODevice() *BlockIODevice {
-	if m != nil {
-		return m.BlkIODevice
-	}
-	return nil
-}
-
-func (m *ThrottleDevice) GetRate() uint64 {
-	if m != nil {
-		return m.Rate
-	}
-	return 0
-}
-
-type UpdateContainerResponse struct {
-}
-
-func (m *UpdateContainerResponse) Reset()                    { *m = UpdateContainerResponse{} }
-func (m *UpdateContainerResponse) String() string            { return proto.CompactTextString(m) }
-func (*UpdateContainerResponse) ProtoMessage()               {}
-func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} }
-
-type EventsRequest struct {
-	// Tag 1 is deprecated (old uint64 timestamp)
-	Timestamp  *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"`
-	StoredOnly bool                       `protobuf:"varint,3,opt,name=storedOnly" json:"storedOnly,omitempty"`
-	Id         string                     `protobuf:"bytes,4,opt,name=id" json:"id,omitempty"`
-}
-
-func (m *EventsRequest) Reset()                    { *m = EventsRequest{} }
-func (m *EventsRequest) String() string            { return proto.CompactTextString(m) }
-func (*EventsRequest) ProtoMessage()               {}
-func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} }
-
-func (m *EventsRequest) GetTimestamp() *google_protobuf.Timestamp {
-	if m != nil {
-		return m.Timestamp
-	}
-	return nil
-}
-
-func (m *EventsRequest) GetStoredOnly() bool {
-	if m != nil {
-		return m.StoredOnly
-	}
-	return false
-}
-
-func (m *EventsRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-type Event struct {
-	Type   string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"`
-	Id     string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"`
-	Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"`
-	Pid    string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"`
-	// Tag 5 is deprecated (old uint64 timestamp)
-	Timestamp *google_protobuf.Timestamp `protobuf:"bytes,6,opt,name=timestamp" json:"timestamp,omitempty"`
-}
-
-func (m *Event) Reset()                    { *m = Event{} }
-func (m *Event) String() string            { return proto.CompactTextString(m) }
-func (*Event) ProtoMessage()               {}
-func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} }
-
-func (m *Event) GetType() string {
-	if m != nil {
-		return m.Type
-	}
-	return ""
-}
-
-func (m *Event) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func (m *Event) GetStatus() uint32 {
-	if m != nil {
-		return m.Status
-	}
-	return 0
-}
-
-func (m *Event) GetPid() string {
-	if m != nil {
-		return m.Pid
-	}
-	return ""
-}
-
-func (m *Event) GetTimestamp() *google_protobuf.Timestamp {
-	if m != nil {
-		return m.Timestamp
-	}
-	return nil
-}
-
-type NetworkStats struct {
-	Name       string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
-	RxBytes    uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes" json:"rx_bytes,omitempty"`
-	Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets,json=rxPackets" json:"rx_Packets,omitempty"`
-	RxErrors   uint64 `protobuf:"varint,4,opt,name=Rx_errors,json=RxErrors" json:"Rx_errors,omitempty"`
-	RxDropped  uint64 `protobuf:"varint,5,opt,name=Rx_dropped,json=RxDropped" json:"Rx_dropped,omitempty"`
-	TxBytes    uint64 `protobuf:"varint,6,opt,name=Tx_bytes,json=TxBytes" json:"Tx_bytes,omitempty"`
-	TxPackets  uint64 `protobuf:"varint,7,opt,name=Tx_packets,json=TxPackets" json:"Tx_packets,omitempty"`
-	TxErrors   uint64 `protobuf:"varint,8,opt,name=Tx_errors,json=TxErrors" json:"Tx_errors,omitempty"`
-	TxDropped  uint64 `protobuf:"varint,9,opt,name=Tx_dropped,json=TxDropped" json:"Tx_dropped,omitempty"`
-}
-
-func (m *NetworkStats) Reset()                    { *m = NetworkStats{} }
-func (m *NetworkStats) String() string            { return proto.CompactTextString(m) }
-func (*NetworkStats) ProtoMessage()               {}
-func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} }
-
-func (m *NetworkStats) GetName() string {
-	if m != nil {
-		return m.Name
-	}
-	return ""
-}
-
-func (m *NetworkStats) GetRxBytes() uint64 {
-	if m != nil {
-		return m.RxBytes
-	}
-	return 0
-}
-
-func (m *NetworkStats) GetRx_Packets() uint64 {
-	if m != nil {
-		return m.Rx_Packets
-	}
-	return 0
-}
-
-func (m *NetworkStats) GetRxErrors() uint64 {
-	if m != nil {
-		return m.RxErrors
-	}
-	return 0
-}
-
-func (m *NetworkStats) GetRxDropped() uint64 {
-	if m != nil {
-		return m.RxDropped
-	}
-	return 0
-}
-
-func (m *NetworkStats) GetTxBytes() uint64 {
-	if m != nil {
-		return m.TxBytes
-	}
-	return 0
-}
-
-func (m *NetworkStats) GetTxPackets() uint64 {
-	if m != nil {
-		return m.TxPackets
-	}
-	return 0
-}
-
-func (m *NetworkStats) GetTxErrors() uint64 {
-	if m != nil {
-		return m.TxErrors
-	}
-	return 0
-}
-
-func (m *NetworkStats) GetTxDropped() uint64 {
-	if m != nil {
-		return m.TxDropped
-	}
-	return 0
-}
-
-type CpuUsage struct {
-	TotalUsage        uint64   `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"`
-	PercpuUsage       []uint64 `protobuf:"varint,2,rep,packed,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"`
-	UsageInKernelmode uint64   `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"`
-	UsageInUsermode   uint64   `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"`
-}
-
-func (m *CpuUsage) Reset()                    { *m = CpuUsage{} }
-func (m *CpuUsage) String() string            { return proto.CompactTextString(m) }
-func (*CpuUsage) ProtoMessage()               {}
-func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} }
-
-func (m *CpuUsage) GetTotalUsage() uint64 {
-	if m != nil {
-		return m.TotalUsage
-	}
-	return 0
-}
-
-func (m *CpuUsage) GetPercpuUsage() []uint64 {
-	if m != nil {
-		return m.PercpuUsage
-	}
-	return nil
-}
-
-func (m *CpuUsage) GetUsageInKernelmode() uint64 {
-	if m != nil {
-		return m.UsageInKernelmode
-	}
-	return 0
-}
-
-func (m *CpuUsage) GetUsageInUsermode() uint64 {
-	if m != nil {
-		return m.UsageInUsermode
-	}
-	return 0
-}
-
-type ThrottlingData struct {
-	Periods          uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"`
-	ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods" json:"throttled_periods,omitempty"`
-	ThrottledTime    uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"`
-}
-
-func (m *ThrottlingData) Reset()                    { *m = ThrottlingData{} }
-func (m *ThrottlingData) String() string            { return proto.CompactTextString(m) }
-func (*ThrottlingData) ProtoMessage()               {}
-func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} }
-
-func (m *ThrottlingData) GetPeriods() uint64 {
-	if m != nil {
-		return m.Periods
-	}
-	return 0
-}
-
-func (m *ThrottlingData) GetThrottledPeriods() uint64 {
-	if m != nil {
-		return m.ThrottledPeriods
-	}
-	return 0
-}
-
-func (m *ThrottlingData) GetThrottledTime() uint64 {
-	if m != nil {
-		return m.ThrottledTime
-	}
-	return 0
-}
-
-type CpuStats struct {
-	CpuUsage       *CpuUsage       `protobuf:"bytes,1,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"`
-	ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data,json=throttlingData" json:"throttling_data,omitempty"`
-	SystemUsage    uint64          `protobuf:"varint,3,opt,name=system_usage,json=systemUsage" json:"system_usage,omitempty"`
-}
-
-func (m *CpuStats) Reset()                    { *m = CpuStats{} }
-func (m *CpuStats) String() string            { return proto.CompactTextString(m) }
-func (*CpuStats) ProtoMessage()               {}
-func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} }
-
-func (m *CpuStats) GetCpuUsage() *CpuUsage {
-	if m != nil {
-		return m.CpuUsage
-	}
-	return nil
-}
-
-func (m *CpuStats) GetThrottlingData() *ThrottlingData {
-	if m != nil {
-		return m.ThrottlingData
-	}
-	return nil
-}
-
-func (m *CpuStats) GetSystemUsage() uint64 {
-	if m != nil {
-		return m.SystemUsage
-	}
-	return 0
-}
-
-type PidsStats struct {
-	Current uint64 `protobuf:"varint,1,opt,name=current" json:"current,omitempty"`
-	Limit   uint64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"`
-}
-
-func (m *PidsStats) Reset()                    { *m = PidsStats{} }
-func (m *PidsStats) String() string            { return proto.CompactTextString(m) }
-func (*PidsStats) ProtoMessage()               {}
-func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} }
-
-func (m *PidsStats) GetCurrent() uint64 {
-	if m != nil {
-		return m.Current
-	}
-	return 0
-}
-
-func (m *PidsStats) GetLimit() uint64 {
-	if m != nil {
-		return m.Limit
-	}
-	return 0
-}
-
-type MemoryData struct {
-	Usage    uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"`
-	MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"`
-	Failcnt  uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"`
-	Limit    uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"`
-}
-
-func (m *MemoryData) Reset()                    { *m = MemoryData{} }
-func (m *MemoryData) String() string            { return proto.CompactTextString(m) }
-func (*MemoryData) ProtoMessage()               {}
-func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} }
-
-func (m *MemoryData) GetUsage() uint64 {
-	if m != nil {
-		return m.Usage
-	}
-	return 0
-}
-
-func (m *MemoryData) GetMaxUsage() uint64 {
-	if m != nil {
-		return m.MaxUsage
-	}
-	return 0
-}
-
-func (m *MemoryData) GetFailcnt() uint64 {
-	if m != nil {
-		return m.Failcnt
-	}
-	return 0
-}
-
-func (m *MemoryData) GetLimit() uint64 {
-	if m != nil {
-		return m.Limit
-	}
-	return 0
-}
-
-type MemoryStats struct {
-	Cache       uint64            `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"`
-	Usage       *MemoryData       `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"`
-	SwapUsage   *MemoryData       `protobuf:"bytes,3,opt,name=swap_usage,json=swapUsage" json:"swap_usage,omitempty"`
-	KernelUsage *MemoryData       `protobuf:"bytes,4,opt,name=kernel_usage,json=kernelUsage" json:"kernel_usage,omitempty"`
-	Stats       map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"`
-}
-
-func (m *MemoryStats) Reset()                    { *m = MemoryStats{} }
-func (m *MemoryStats) String() string            { return proto.CompactTextString(m) }
-func (*MemoryStats) ProtoMessage()               {}
-func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} }
-
-func (m *MemoryStats) GetCache() uint64 {
-	if m != nil {
-		return m.Cache
-	}
-	return 0
-}
-
-func (m *MemoryStats) GetUsage() *MemoryData {
-	if m != nil {
-		return m.Usage
-	}
-	return nil
-}
-
-func (m *MemoryStats) GetSwapUsage() *MemoryData {
-	if m != nil {
-		return m.SwapUsage
-	}
-	return nil
-}
-
-func (m *MemoryStats) GetKernelUsage() *MemoryData {
-	if m != nil {
-		return m.KernelUsage
-	}
-	return nil
-}
-
-func (m *MemoryStats) GetStats() map[string]uint64 {
-	if m != nil {
-		return m.Stats
-	}
-	return nil
-}
-
-type BlkioStatsEntry struct {
-	Major uint64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
-	Minor uint64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
-	Op    string `protobuf:"bytes,3,opt,name=op" json:"op,omitempty"`
-	Value uint64 `protobuf:"varint,4,opt,name=value" json:"value,omitempty"`
-}
-
-func (m *BlkioStatsEntry) Reset()                    { *m = BlkioStatsEntry{} }
-func (m *BlkioStatsEntry) String() string            { return proto.CompactTextString(m) }
-func (*BlkioStatsEntry) ProtoMessage()               {}
-func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} }
-
-func (m *BlkioStatsEntry) GetMajor() uint64 {
-	if m != nil {
-		return m.Major
-	}
-	return 0
-}
-
-func (m *BlkioStatsEntry) GetMinor() uint64 {
-	if m != nil {
-		return m.Minor
-	}
-	return 0
-}
-
-func (m *BlkioStatsEntry) GetOp() string {
-	if m != nil {
-		return m.Op
-	}
-	return ""
-}
-
-func (m *BlkioStatsEntry) GetValue() uint64 {
-	if m != nil {
-		return m.Value
-	}
-	return 0
-}
-
-type BlkioStats struct {
-	IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"`
-	IoServicedRecursive     []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"`
-	IoQueuedRecursive       []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"`
-	IoServiceTimeRecursive  []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"`
-	IoWaitTimeRecursive     []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"`
-	IoMergedRecursive       []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"`
-	IoTimeRecursive         []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"`
-	SectorsRecursive        []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"`
-}
-
-func (m *BlkioStats) Reset()                    { *m = BlkioStats{} }
-func (m *BlkioStats) String() string            { return proto.CompactTextString(m) }
-func (*BlkioStats) ProtoMessage()               {}
-func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} }
-
-func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry {
-	if m != nil {
-		return m.IoServiceBytesRecursive
-	}
-	return nil
-}
-
-func (m *BlkioStats) GetIoServicedRecursive() []*BlkioStatsEntry {
-	if m != nil {
-		return m.IoServicedRecursive
-	}
-	return nil
-}
-
-func (m *BlkioStats) GetIoQueuedRecursive() []*BlkioStatsEntry {
-	if m != nil {
-		return m.IoQueuedRecursive
-	}
-	return nil
-}
-
-func (m *BlkioStats) GetIoServiceTimeRecursive() []*BlkioStatsEntry {
-	if m != nil {
-		return m.IoServiceTimeRecursive
-	}
-	return nil
-}
-
-func (m *BlkioStats) GetIoWaitTimeRecursive() []*BlkioStatsEntry {
-	if m != nil {
-		return m.IoWaitTimeRecursive
-	}
-	return nil
-}
-
-func (m *BlkioStats) GetIoMergedRecursive() []*BlkioStatsEntry {
-	if m != nil {
-		return m.IoMergedRecursive
-	}
-	return nil
-}
-
-func (m *BlkioStats) GetIoTimeRecursive() []*BlkioStatsEntry {
-	if m != nil {
-		return m.IoTimeRecursive
-	}
-	return nil
-}
-
-func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry {
-	if m != nil {
-		return m.SectorsRecursive
-	}
-	return nil
-}
-
-type HugetlbStats struct {
-	Usage    uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"`
-	MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"`
-	Failcnt  uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"`
-	Limit    uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"`
-}
-
-func (m *HugetlbStats) Reset()                    { *m = HugetlbStats{} }
-func (m *HugetlbStats) String() string            { return proto.CompactTextString(m) }
-func (*HugetlbStats) ProtoMessage()               {}
-func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} }
-
-func (m *HugetlbStats) GetUsage() uint64 {
-	if m != nil {
-		return m.Usage
-	}
-	return 0
-}
-
-func (m *HugetlbStats) GetMaxUsage() uint64 {
-	if m != nil {
-		return m.MaxUsage
-	}
-	return 0
-}
-
-func (m *HugetlbStats) GetFailcnt() uint64 {
-	if m != nil {
-		return m.Failcnt
-	}
-	return 0
-}
-
-func (m *HugetlbStats) GetLimit() uint64 {
-	if m != nil {
-		return m.Limit
-	}
-	return 0
-}
-
-type CgroupStats struct {
-	CpuStats     *CpuStats                `protobuf:"bytes,1,opt,name=cpu_stats,json=cpuStats" json:"cpu_stats,omitempty"`
-	MemoryStats  *MemoryStats             `protobuf:"bytes,2,opt,name=memory_stats,json=memoryStats" json:"memory_stats,omitempty"`
-	BlkioStats   *BlkioStats              `protobuf:"bytes,3,opt,name=blkio_stats,json=blkioStats" json:"blkio_stats,omitempty"`
-	HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats,json=hugetlbStats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
-	PidsStats    *PidsStats               `protobuf:"bytes,5,opt,name=pids_stats,json=pidsStats" json:"pids_stats,omitempty"`
-}
-
-func (m *CgroupStats) Reset()                    { *m = CgroupStats{} }
-func (m *CgroupStats) String() string            { return proto.CompactTextString(m) }
-func (*CgroupStats) ProtoMessage()               {}
-func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} }
-
-func (m *CgroupStats) GetCpuStats() *CpuStats {
-	if m != nil {
-		return m.CpuStats
-	}
-	return nil
-}
-
-func (m *CgroupStats) GetMemoryStats() *MemoryStats {
-	if m != nil {
-		return m.MemoryStats
-	}
-	return nil
-}
-
-func (m *CgroupStats) GetBlkioStats() *BlkioStats {
-	if m != nil {
-		return m.BlkioStats
-	}
-	return nil
-}
-
-func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats {
-	if m != nil {
-		return m.HugetlbStats
-	}
-	return nil
-}
-
-func (m *CgroupStats) GetPidsStats() *PidsStats {
-	if m != nil {
-		return m.PidsStats
-	}
-	return nil
-}
-
-type StatsResponse struct {
-	NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats,json=networkStats" json:"network_stats,omitempty"`
-	CgroupStats  *CgroupStats    `protobuf:"bytes,2,opt,name=cgroup_stats,json=cgroupStats" json:"cgroup_stats,omitempty"`
-	// Tag 3 is deprecated (old uint64 timestamp)
-	Timestamp *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=timestamp" json:"timestamp,omitempty"`
-}
-
-func (m *StatsResponse) Reset()                    { *m = StatsResponse{} }
-func (m *StatsResponse) String() string            { return proto.CompactTextString(m) }
-func (*StatsResponse) ProtoMessage()               {}
-func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} }
-
-func (m *StatsResponse) GetNetworkStats() []*NetworkStats {
-	if m != nil {
-		return m.NetworkStats
-	}
-	return nil
-}
-
-func (m *StatsResponse) GetCgroupStats() *CgroupStats {
-	if m != nil {
-		return m.CgroupStats
-	}
-	return nil
-}
-
-func (m *StatsResponse) GetTimestamp() *google_protobuf.Timestamp {
-	if m != nil {
-		return m.Timestamp
-	}
-	return nil
-}
-
-type StatsRequest struct {
-	Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
-}
-
-func (m *StatsRequest) Reset()                    { *m = StatsRequest{} }
-func (m *StatsRequest) String() string            { return proto.CompactTextString(m) }
-func (*StatsRequest) ProtoMessage()               {}
-func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} }
-
-func (m *StatsRequest) GetId() string {
-	if m != nil {
-		return m.Id
-	}
-	return ""
-}
-
-func init() {
-	proto.RegisterType((*GetServerVersionRequest)(nil), "types.GetServerVersionRequest")
-	proto.RegisterType((*GetServerVersionResponse)(nil), "types.GetServerVersionResponse")
-	proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest")
-	proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse")
-	proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest")
-	proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse")
-	proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest")
-	proto.RegisterType((*SignalResponse)(nil), "types.SignalResponse")
-	proto.RegisterType((*AddProcessRequest)(nil), "types.AddProcessRequest")
-	proto.RegisterType((*Rlimit)(nil), "types.Rlimit")
-	proto.RegisterType((*User)(nil), "types.User")
-	proto.RegisterType((*AddProcessResponse)(nil), "types.AddProcessResponse")
-	proto.RegisterType((*CreateCheckpointRequest)(nil), "types.CreateCheckpointRequest")
-	proto.RegisterType((*CreateCheckpointResponse)(nil), "types.CreateCheckpointResponse")
-	proto.RegisterType((*DeleteCheckpointRequest)(nil), "types.DeleteCheckpointRequest")
-	proto.RegisterType((*DeleteCheckpointResponse)(nil), "types.DeleteCheckpointResponse")
-	proto.RegisterType((*ListCheckpointRequest)(nil), "types.ListCheckpointRequest")
-	proto.RegisterType((*Checkpoint)(nil), "types.Checkpoint")
-	proto.RegisterType((*ListCheckpointResponse)(nil), "types.ListCheckpointResponse")
-	proto.RegisterType((*StateRequest)(nil), "types.StateRequest")
-	proto.RegisterType((*ContainerState)(nil), "types.ContainerState")
-	proto.RegisterType((*Process)(nil), "types.Process")
-	proto.RegisterType((*Container)(nil), "types.Container")
-	proto.RegisterType((*Machine)(nil), "types.Machine")
-	proto.RegisterType((*StateResponse)(nil), "types.StateResponse")
-	proto.RegisterType((*UpdateContainerRequest)(nil), "types.UpdateContainerRequest")
-	proto.RegisterType((*UpdateResource)(nil), "types.UpdateResource")
-	proto.RegisterType((*BlockIODevice)(nil), "types.BlockIODevice")
-	proto.RegisterType((*WeightDevice)(nil), "types.WeightDevice")
-	proto.RegisterType((*ThrottleDevice)(nil), "types.ThrottleDevice")
-	proto.RegisterType((*UpdateContainerResponse)(nil), "types.UpdateContainerResponse")
-	proto.RegisterType((*EventsRequest)(nil), "types.EventsRequest")
-	proto.RegisterType((*Event)(nil), "types.Event")
-	proto.RegisterType((*NetworkStats)(nil), "types.NetworkStats")
-	proto.RegisterType((*CpuUsage)(nil), "types.CpuUsage")
-	proto.RegisterType((*ThrottlingData)(nil), "types.ThrottlingData")
-	proto.RegisterType((*CpuStats)(nil), "types.CpuStats")
-	proto.RegisterType((*PidsStats)(nil), "types.PidsStats")
-	proto.RegisterType((*MemoryData)(nil), "types.MemoryData")
-	proto.RegisterType((*MemoryStats)(nil), "types.MemoryStats")
-	proto.RegisterType((*BlkioStatsEntry)(nil), "types.BlkioStatsEntry")
-	proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats")
-	proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats")
-	proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats")
-	proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse")
-	proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest")
-}
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ context.Context
-var _ grpc.ClientConn
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-const _ = grpc.SupportPackageIsVersion4
-
-// Client API for API service
-
-type APIClient interface {
-	GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error)
-	CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error)
-	UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error)
-	Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error)
-	UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error)
-	AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error)
-	CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error)
-	DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error)
-	ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error)
-	State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error)
-	Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error)
-	Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error)
-}
-
-type aPIClient struct {
-	cc *grpc.ClientConn
-}
-
-func NewAPIClient(cc *grpc.ClientConn) APIClient {
-	return &aPIClient{cc}
-}
-
-func (c *aPIClient) GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) {
-	out := new(GetServerVersionResponse)
-	err := grpc.Invoke(ctx, "/types.API/GetServerVersion", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) {
-	out := new(CreateContainerResponse)
-	err := grpc.Invoke(ctx, "/types.API/CreateContainer", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) {
-	out := new(UpdateContainerResponse)
-	err := grpc.Invoke(ctx, "/types.API/UpdateContainer", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) {
-	out := new(SignalResponse)
-	err := grpc.Invoke(ctx, "/types.API/Signal", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) {
-	out := new(UpdateProcessResponse)
-	err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) {
-	out := new(AddProcessResponse)
-	err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) {
-	out := new(CreateCheckpointResponse)
-	err := grpc.Invoke(ctx, "/types.API/CreateCheckpoint", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) {
-	out := new(DeleteCheckpointResponse)
-	err := grpc.Invoke(ctx, "/types.API/DeleteCheckpoint", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) {
-	out := new(ListCheckpointResponse)
-	err := grpc.Invoke(ctx, "/types.API/ListCheckpoint", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) {
-	out := new(StateResponse)
-	err := grpc.Invoke(ctx, "/types.API/State", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-func (c *aPIClient) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) {
-	stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/types.API/Events", opts...)
-	if err != nil {
-		return nil, err
-	}
-	x := &aPIEventsClient{stream}
-	if err := x.ClientStream.SendMsg(in); err != nil {
-		return nil, err
-	}
-	if err := x.ClientStream.CloseSend(); err != nil {
-		return nil, err
-	}
-	return x, nil
-}
-
-type API_EventsClient interface {
-	Recv() (*Event, error)
-	grpc.ClientStream
-}
-
-type aPIEventsClient struct {
-	grpc.ClientStream
-}
-
-func (x *aPIEventsClient) Recv() (*Event, error) {
-	m := new(Event)
-	if err := x.ClientStream.RecvMsg(m); err != nil {
-		return nil, err
-	}
-	return m, nil
-}
-
-func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) {
-	out := new(StatsResponse)
-	err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...)
-	if err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// Server API for API service
-
-type APIServer interface {
-	GetServerVersion(context.Context, *GetServerVersionRequest) (*GetServerVersionResponse, error)
-	CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
-	UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
-	Signal(context.Context, *SignalRequest) (*SignalResponse, error)
-	UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error)
-	AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error)
-	CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error)
-	DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error)
-	ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error)
-	State(context.Context, *StateRequest) (*StateResponse, error)
-	Events(*EventsRequest, API_EventsServer) error
-	Stats(context.Context, *StatsRequest) (*StatsResponse, error)
-}
-
-func RegisterAPIServer(s *grpc.Server, srv APIServer) {
-	s.RegisterService(&_API_serviceDesc, srv)
-}
-
-func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(GetServerVersionRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).GetServerVersion(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/GetServerVersion",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(CreateContainerRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).CreateContainer(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/CreateContainer",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(UpdateContainerRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).UpdateContainer(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/UpdateContainer",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(SignalRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).Signal(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/Signal",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).Signal(ctx, req.(*SignalRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(UpdateProcessRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).UpdateProcess(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/UpdateProcess",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(AddProcessRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).AddProcess(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/AddProcess",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(CreateCheckpointRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).CreateCheckpoint(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/CreateCheckpoint",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(DeleteCheckpointRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).DeleteCheckpoint(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/DeleteCheckpoint",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(ListCheckpointRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).ListCheckpoint(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/ListCheckpoint",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(StateRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).State(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/State",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).State(ctx, req.(*StateRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error {
-	m := new(EventsRequest)
-	if err := stream.RecvMsg(m); err != nil {
-		return err
-	}
-	return srv.(APIServer).Events(m, &aPIEventsServer{stream})
-}
-
-type API_EventsServer interface {
-	Send(*Event) error
-	grpc.ServerStream
-}
-
-type aPIEventsServer struct {
-	grpc.ServerStream
-}
-
-func (x *aPIEventsServer) Send(m *Event) error {
-	return x.ServerStream.SendMsg(m)
-}
-
-func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
-	in := new(StatsRequest)
-	if err := dec(in); err != nil {
-		return nil, err
-	}
-	if interceptor == nil {
-		return srv.(APIServer).Stats(ctx, in)
-	}
-	info := &grpc.UnaryServerInfo{
-		Server:     srv,
-		FullMethod: "/types.API/Stats",
-	}
-	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
-		return srv.(APIServer).Stats(ctx, req.(*StatsRequest))
-	}
-	return interceptor(ctx, in, info, handler)
-}
-
-var _API_serviceDesc = grpc.ServiceDesc{
-	ServiceName: "types.API",
-	HandlerType: (*APIServer)(nil),
-	Methods: []grpc.MethodDesc{
-		{
-			MethodName: "GetServerVersion",
-			Handler:    _API_GetServerVersion_Handler,
-		},
-		{
-			MethodName: "CreateContainer",
-			Handler:    _API_CreateContainer_Handler,
-		},
-		{
-			MethodName: "UpdateContainer",
-			Handler:    _API_UpdateContainer_Handler,
-		},
-		{
-			MethodName: "Signal",
-			Handler:    _API_Signal_Handler,
-		},
-		{
-			MethodName: "UpdateProcess",
-			Handler:    _API_UpdateProcess_Handler,
-		},
-		{
-			MethodName: "AddProcess",
-			Handler:    _API_AddProcess_Handler,
-		},
-		{
-			MethodName: "CreateCheckpoint",
-			Handler:    _API_CreateCheckpoint_Handler,
-		},
-		{
-			MethodName: "DeleteCheckpoint",
-			Handler:    _API_DeleteCheckpoint_Handler,
-		},
-		{
-			MethodName: "ListCheckpoint",
-			Handler:    _API_ListCheckpoint_Handler,
-		},
-		{
-			MethodName: "State",
-			Handler:    _API_State_Handler,
-		},
-		{
-			MethodName: "Stats",
-			Handler:    _API_Stats_Handler,
-		},
-	},
-	Streams: []grpc.StreamDesc{
-		{
-			StreamName:    "Events",
-			Handler:       _API_Events_Handler,
-			ServerStreams: true,
-		},
-	},
-	Metadata: "api.proto",
-}
-
-func init() { proto.RegisterFile("api.proto", fileDescriptor0) }
-
-var fileDescriptor0 = []byte{
-	// 2666 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x19, 0x4d, 0x6f, 0x24, 0x47,
-	0x75, 0x67, 0xa6, 0xed, 0xf1, 0xbc, 0xf9, 0xb0, 0xa7, 0xd6, 0xeb, 0xed, 0x9d, 0x24, 0xbb, 0x4e,
-	0x2b, 0x10, 0x03, 0x91, 0xb3, 0x78, 0x13, 0x58, 0x11, 0x09, 0x69, 0xd7, 0x1b, 0x82, 0xc9, 0x3a,
-	0x99, 0x94, 0x6d, 0x56, 0x48, 0x48, 0xa3, 0x76, 0x77, 0xed, 0x4c, 0xe1, 0x9e, 0xae, 0x4e, 0x75,
-	0xb5, 0x3d, 0xbe, 0xe4, 0xc0, 0x01, 0x6e, 0x70, 0x45, 0xe2, 0xc8, 0x8d, 0x3b, 0x07, 0xf8, 0x03,
-	0x48, 0xfc, 0x10, 0x24, 0x0e, 0xdc, 0x39, 0xa2, 0xfa, 0xe8, 0xee, 0xea, 0xf9, 0xf0, 0x6e, 0x90,
-	0x10, 0x17, 0x2e, 0xad, 0x7a, 0xaf, 0xde, 0x57, 0xbd, 0x7a, 0xef, 0xd5, 0xab, 0x2e, 0x68, 0xf9,
-	0x09, 0xdd, 0x4f, 0x38, 0x13, 0x0c, 0xad, 0x89, 0xeb, 0x84, 0xa4, 0x83, 0x07, 0x63, 0xc6, 0xc6,
-	0x11, 0x79, 0x5f, 0x21, 0xcf, 0xb3, 0x97, 0xef, 0x0b, 0x3a, 0x25, 0xa9, 0xf0, 0xa7, 0x89, 0xa6,
-	0xf3, 0xee, 0xc1, 0xdd, 0x4f, 0x88, 0x38, 0x21, 0xfc, 0x92, 0xf0, 0x9f, 0x12, 0x9e, 0x52, 0x16,
-	0x63, 0xf2, 0x65, 0x46, 0x52, 0xe1, 0xcd, 0xc0, 0x5d, 0x9c, 0x4a, 0x13, 0x16, 0xa7, 0x04, 0x6d,
-	0xc3, 0xda, 0xd4, 0xff, 0x05, 0xe3, 0x6e, 0x6d, 0xb7, 0xb6, 0xd7, 0xc5, 0x1a, 0x50, 0x58, 0x1a,
-	0x33, 0xee, 0xd6, 0x0d, 0x56, 0x02, 0x12, 0x9b, 0xf8, 0x22, 0x98, 0xb8, 0x0d, 0x8d, 0x55, 0x00,
-	0x1a, 0xc0, 0x06, 0x27, 0x97, 0x54, 0x4a, 0x75, 0x9d, 0xdd, 0xda, 0x5e, 0x0b, 0x17, 0xb0, 0xf7,
-	0xab, 0x1a, 0x6c, 0x9f, 0x25, 0xa1, 0x2f, 0xc8, 0x90, 0xb3, 0x80, 0xa4, 0xa9, 0x31, 0x09, 0xf5,
-	0xa0, 0x4e, 0x43, 0xa5, 0xb3, 0x85, 0xeb, 0x34, 0x44, 0x5b, 0xd0, 0x48, 0x68, 0xa8, 0xd4, 0xb5,
-	0xb0, 0x1c, 0xa2, 0xfb, 0x00, 0x41, 0xc4, 0x52, 0x72, 0x22, 0x42, 0x1a, 0x2b, 0x8d, 0x1b, 0xd8,
-	0xc2, 0x48, 0x63, 0xae, 0x68, 0x28, 0x26, 0x4a, 0x67, 0x17, 0x6b, 0x00, 0xed, 0xc0, 0xfa, 0x84,
-	0xd0, 0xf1, 0x44, 0xb8, 0x6b, 0x0a, 0x6d, 0x20, 0xef, 0x2e, 0xdc, 0x99, 0xb3, 0x43, 0xaf, 0xdf,
-	0xfb, 0x5b, 0x1d, 0x76, 0x0e, 0x39, 0xf1, 0x05, 0x39, 0x64, 0xb1, 0xf0, 0x69, 0x4c, 0xf8, 0x2a,
-	0x1b, 0xef, 0x03, 0x9c, 0x67, 0x71, 0x18, 0x91, 0xa1, 0x2f, 0x26, 0xc6, 0x54, 0x0b, 0xa3, 0x2c,
-	0x9e, 0x90, 0xe0, 0x22, 0x61, 0x34, 0x16, 0xca, 0xe2, 0x16, 0xb6, 0x30, 0xd2, 0xe2, 0x54, 0x2d,
-	0x46, 0x7b, 0x49, 0x03, 0xd2, 0xe2, 0x54, 0x84, 0x2c, 0xd3, 0x16, 0xb7, 0xb0, 0x81, 0x0c, 0x9e,
-	0x70, 0xee, 0xae, 0x17, 0x78, 0xc2, 0xb9, 0xc4, 0x47, 0xfe, 0x39, 0x89, 0x52, 0xb7, 0xb9, 0xdb,
-	0x90, 0x78, 0x0d, 0xa1, 0x5d, 0x68, 0xc7, 0x6c, 0x48, 0x2f, 0x99, 0xc0, 0x8c, 0x09, 0x77, 0x43,
-	0x39, 0xcc, 0x46, 0x21, 0x17, 0x9a, 0x3c, 0x8b, 0x65, 0xdc, 0xb8, 0x2d, 0x25, 0x32, 0x07, 0x25,
-	0xaf, 0x19, 0x3e, 0xe1, 0xe3, 0xd4, 0x05, 0x25, 0xd8, 0x46, 0xa1, 0x77, 0xa0, 0x5b, 0xae, 0xe4,
-	0x19, 0xe5, 0x6e, 0x5b, 0x49, 0xa8, 0x22, 0xbd, 0x23, 0xb8, 0xbb, 0xe0, 0x4b, 0x13, 0x67, 0xfb,
-	0xd0, 0x0a, 0x72, 0xa4, 0xf2, 0x69, 0xfb, 0x60, 0x6b, 0x5f, 0x85, 0xf6, 0x7e, 0x49, 0x5c, 0x92,
-	0x78, 0x47, 0xd0, 0x3d, 0xa1, 0xe3, 0xd8, 0x8f, 0x5e, 0x3f, 0x62, 0xa4, 0xc7, 0x14, 0x8b, 0x89,
-	0x4f, 0x03, 0x79, 0x5b, 0xd0, 0xcb, 0x45, 0x99, 0x4d, 0xff, 0x53, 0x03, 0xfa, 0x4f, 0xc2, 0xf0,
-	0x15, 0x31, 0x39, 0x80, 0x0d, 0x41, 0xf8, 0x94, 0x4a, 0x89, 0x75, 0xe5, 0xce, 0x02, 0x46, 0x0f,
-	0xc0, 0xc9, 0x52, 0xc2, 0x95, 0xa6, 0xf6, 0x41, 0xdb, 0xac, 0xe4, 0x2c, 0x25, 0x1c, 0xab, 0x09,
-	0x84, 0xc0, 0xf1, 0xa5, 0x2f, 0x1d, 0xe5, 0x4b, 0x35, 0x96, 0x26, 0x93, 0xf8, 0xd2, 0x5d, 0x53,
-	0x28, 0x39, 0x94, 0x98, 0xe0, 0x2a, 0x34, 0x3b, 0x2c, 0x87, 0xf9, 0xb2, 0x9a, 0xe5, 0xb2, 0x8a,
-	0xb0, 0xd9, 0x58, 0x1e, 0x36, 0xad, 0x15, 0x61, 0x03, 0x95, 0xb0, 0xf1, 0xa0, 0x13, 0xf8, 0x89,
-	0x7f, 0x4e, 0x23, 0x2a, 0x28, 0x49, 0xdd, 0xb6, 0x32, 0xa2, 0x82, 0x43, 0x7b, 0xb0, 0xe9, 0x27,
-	0x89, 0xcf, 0xa7, 0x8c, 0x0f, 0x39, 0x7b, 0x49, 0x23, 0xe2, 0x76, 0x94, 0x90, 0x79, 0xb4, 0x94,
-	0x96, 0x92, 0x88, 0xc6, 0xd9, 0xec, 0xb9, 0x8c, 0x3e, 0xb7, 0xab, 0xc8, 0x2a, 0x38, 0x29, 0x2d,
-	0x66, 0x9f, 0x91, 0xab, 0x21, 0xa7, 0x97, 0x34, 0x22, 0x63, 0x92, 0xba, 0x3d, 0xe5, 0xc5, 0x79,
-	0x34, 0x7a, 0x17, 0x9a, 0x3c, 0xa2, 0x53, 0x2a, 0x52, 0x77, 0x73, 0xb7, 0xb1, 0xd7, 0x3e, 0xe8,
-	0x1a, 0x7f, 0x62, 0x85, 0xc5, 0xf9, 0xac, 0xf7, 0x0c, 0xd6, 0x35, 0x4a, 0xba, 0x57, 0x92, 0x98,
-	0xdd, 0x52, 0x63, 0x89, 0x4b, 0xd9, 0x4b, 0xa1, 0xf6, 0xca, 0xc1, 0x6a, 0x2c, 0x71, 0x13, 0x9f,
-	0x87, 0x6a, 0x9f, 0x1c, 0xac, 0xc6, 0x1e, 0x06, 0x47, 0x6e, 0x94, 0x74, 0x75, 0x66, 0x36, 0xbc,
-	0x8b, 0xe5, 0x50, 0x62, 0xc6, 0x26, 0xa6, 0xba, 0x58, 0x0e, 0xd1, 0x37, 0xa1, 0xe7, 0x87, 0x21,
-	0x15, 0x94, 0xc5, 0x7e, 0xf4, 0x09, 0x0d, 0x53, 0xb7, 0xb1, 0xdb, 0xd8, 0xeb, 0xe2, 0x39, 0xac,
-	0x77, 0x00, 0xc8, 0x0e, 0x28, 0x13, 0xf4, 0x6f, 0x42, 0x2b, 0xbd, 0x4e, 0x05, 0x99, 0x0e, 0x0b,
-	0x3d, 0x25, 0xc2, 0xfb, 0x65, 0xad, 0x48, 0x97, 0x22, 0x8b, 0x56, 0xc5, 0xe2, 0x77, 0x2b, 0xb5,
-	0xa5, 0xae, 0xa2, 0xae, 0x9f, 0xe7, 0x4f, 0xc9, 0x6d, 0x97, 0x9b, 0x85, 0x94, 0x6d, 0x2c, 0x4b,
-	0xd9, 0x01, 0xb8, 0x8b, 0x36, 0x98, 0x34, 0x09, 0xe0, 0xee, 0x33, 0x12, 0x91, 0xd7, 0xb1, 0x0f,
-	0x81, 0x13, 0xfb, 0x53, 0x62, 0xd2, 0x51, 0x8d, 0x5f, 0xdf, 0x80, 0x45, 0x25, 0xc6, 0x80, 0x63,
-	0xb8, 0xf3, 0x9c, 0xa6, 0xe2, 0xd5, 0xea, 0x17, 0x54, 0xd5, 0x97, 0xa9, 0xfa, 0x5d, 0x0d, 0xa0,
-	0x94, 0x55, 0xd8, 0x5c, 0xb3, 0x6c, 0x46, 0xe0, 0x90, 0x19, 0x15, 0x26, 0xdf, 0xd5, 0x58, 0x46,
-	0x85, 0x08, 0x12, 0x73, 0x04, 0xc9, 0xa1, 0xac, 0x97, 0x59, 0x4c, 0x67, 0x27, 0x2c, 0xb8, 0x20,
-	0x22, 0x55, 0xf5, 0x7c, 0x03, 0xdb, 0x28, 0x95, 0xb4, 0x13, 0x12, 0x45, 0xaa, 0xa8, 0x6f, 0x60,
-	0x0d, 0xc8, 0x0a, 0x4c, 0xa6, 0x89, 0xb8, 0xfe, 0xec, 0xc4, 0x5d, 0x57, 0xf9, 0x97, 0x83, 0xde,
-	0x31, 0xec, 0xcc, 0xaf, 0xd4, 0xc4, 0xd0, 0x23, 0x68, 0x97, 0xab, 0x48, 0xdd, 0x9a, 0x4a, 0x90,
-	0x25, 0x5b, 0x6f, 0x53, 0x79, 0xf7, 0xa1, 0x73, 0x22, 0x7c, 0x41, 0x56, 0xf8, 0xcb, 0xdb, 0x83,
-	0x5e, 0x51, 0x75, 0x15, 0xa1, 0xae, 0x1b, 0xbe, 0xc8, 0x52, 0x43, 0x65, 0x20, 0xef, 0xcf, 0x0d,
-	0x68, 0x9a, 0xb0, 0xce, 0x6b, 0x53, 0xad, 0xac, 0x4d, 0xff, 0x93, 0x12, 0x59, 0xc9, 0xaa, 0xe6,
-	0x5c, 0x56, 0xfd, 0xbf, 0x5c, 0x96, 0xe5, 0xf2, 0xaf, 0x35, 0x68, 0x15, 0xdb, 0xfc, 0xb5, 0xdb,
-	0x99, 0xf7, 0xa0, 0x95, 0xe8, 0x8d, 0x27, 0xba, 0xea, 0xb5, 0x0f, 0x7a, 0x46, 0x51, 0x5e, 0xe7,
-	0x4a, 0x02, 0x2b, 0x7e, 0x1c, 0x3b, 0x7e, 0xac, 0x76, 0x65, 0xad, 0xd2, 0xae, 0x20, 0x70, 0x12,
-	0x59, 0x4e, 0xd7, 0x55, 0x39, 0x55, 0x63, 0xbb, 0x41, 0x69, 0x56, 0x1a, 0x14, 0xef, 0x43, 0x68,
-	0x1e, 0xfb, 0xc1, 0x84, 0xc6, 0x2a, 0x43, 0x83, 0xc4, 0x84, 0x69, 0x17, 0xab, 0xb1, 0x54, 0x32,
-	0x25, 0x53, 0xc6, 0xaf, 0x4d, 0xed, 0x37, 0x90, 0x77, 0x01, 0x5d, 0x93, 0x06, 0x26, 0x99, 0x1e,
-	0x02, 0x14, 0x2d, 0x46, 0x9e, 0x4b, 0x8b, 0x6d, 0x88, 0x45, 0x83, 0xf6, 0xa0, 0x39, 0xd5, 0x9a,
-	0x4d, 0xd5, 0xcd, 0x7d, 0x60, 0xec, 0xc1, 0xf9, 0xb4, 0xf7, 0xeb, 0x1a, 0xec, 0xe8, 0x1e, 0xf3,
-	0x95, 0x9d, 0xe4, 0xf2, 0xde, 0x45, 0xbb, 0xaf, 0x51, 0x71, 0xdf, 0x23, 0x68, 0x71, 0x92, 0xb2,
-	0x8c, 0x07, 0x44, 0x7b, 0xb6, 0x7d, 0x70, 0x27, 0xcf, 0x24, 0xa5, 0x0b, 0x9b, 0x59, 0x5c, 0xd2,
-	0x79, 0xff, 0x68, 0x42, 0xaf, 0x3a, 0x2b, 0x2b, 0xd6, 0x79, 0x74, 0x41, 0xd9, 0x0b, 0xdd, 0x1c,
-	0xd7, 0x94, 0x9b, 0x6c, 0x94, 0xcc, 0xaa, 0x20, 0xc9, 0x4e, 0x26, 0x3e, 0x27, 0xa9, 0x71, 0x63,
-	0x89, 0x30, 0xb3, 0x43, 0xc2, 0x29, 0xcb, 0x0f, 0xd3, 0x12, 0x21, 0xcb, 0x40, 0x90, 0x64, 0x5f,
-	0x64, 0x4c, 0xf8, 0xca, 0x48, 0x07, 0x17, 0xb0, 0xea, 0x8a, 0x93, 0x2c, 0x25, 0xe2, 0x50, 0xee,
-	0xda, 0x9a, 0xe9, 0x8a, 0x0b, 0x4c, 0x39, 0x7f, 0x4c, 0xa6, 0xa9, 0x49, 0x73, 0x0b, 0x23, 0x2d,
-	0xd7, 0xbb, 0xf9, 0x5c, 0x06, 0xb5, 0x0a, 0x0c, 0x07, 0xdb, 0x28, 0x29, 0x41, 0x83, 0x27, 0x57,
-	0x7e, 0xa2, 0xd2, 0xde, 0xc1, 0x16, 0x06, 0xbd, 0x07, 0x7d, 0x0d, 0x61, 0x92, 0x12, 0x7e, 0xe9,
-	0xcb, 0x63, 0x5b, 0x95, 0x01, 0x07, 0x2f, 0x4e, 0x48, 0xea, 0x0b, 0xc2, 0x63, 0x12, 0x1d, 0x5b,
-	0x5a, 0x41, 0x53, 0x2f, 0x4c, 0xa0, 0x03, 0xd8, 0xd6, 0xc8, 0xd3, 0xc3, 0xa1, 0xcd, 0xd0, 0x56,
-	0x0c, 0x4b, 0xe7, 0x64, 0xa6, 0x2b, 0xc7, 0x3f, 0x27, 0xfe, 0x4b, 0xb3, 0x1f, 0x1d, 0x45, 0x3e,
-	0x8f, 0x46, 0x4f, 0xa0, 0x6f, 0x6d, 0xd1, 0x33, 0x72, 0x49, 0x03, 0xe2, 0x76, 0x55, 0xd4, 0xde,
-	0x36, 0x51, 0x60, 0x4f, 0xe1, 0x45, 0x6a, 0x74, 0x06, 0x03, 0x85, 0x3c, 0x9d, 0x70, 0x26, 0x44,
-	0x44, 0x30, 0xf1, 0xc3, 0xa7, 0x49, 0x6a, 0x64, 0xf5, 0x94, 0xac, 0x3c, 0xa2, 0x72, 0x1a, 0x23,
-	0xed, 0x06, 0x46, 0xf4, 0x02, 0xde, 0xa8, 0xcc, 0xbe, 0xe0, 0x54, 0x90, 0x52, 0xee, 0xe6, 0x4d,
-	0x72, 0x6f, 0xe2, 0x5c, 0x10, 0x2c, 0xd5, 0x1e, 0xb1, 0x42, 0xf0, 0xd6, 0xeb, 0x0b, 0xae, 0x72,
-	0xa2, 0x9f, 0xc1, 0x9b, 0x8b, 0x7a, 0x2d, 0xc9, 0xfd, 0x9b, 0x24, 0xdf, 0xc8, 0x2a, 0x93, 0x43,
-	0xd6, 0x2f, 0xbd, 0xf3, 0x48, 0x27, 0x47, 0x81, 0x90, 0x01, 0x15, 0x24, 0x19, 0x26, 0x7e, 0x24,
-	0x4b, 0x99, 0x49, 0xa1, 0xdb, 0x3a, 0xa0, 0x16, 0x26, 0xd0, 0x3e, 0x20, 0x0b, 0x89, 0x4d, 0x39,
-	0xdc, 0xde, 0xad, 0xed, 0x35, 0xf0, 0x92, 0x19, 0xef, 0x23, 0xe8, 0x3e, 0x8d, 0x58, 0x70, 0x71,
-	0xf4, 0xb9, 0x31, 0xa6, 0x72, 0xa1, 0x6f, 0x2c, 0xbd, 0xd0, 0x37, 0xcc, 0x85, 0xde, 0xfb, 0x0a,
-	0x3a, 0x95, 0x60, 0xf9, 0x9e, 0xaa, 0x12, 0xb9, 0x28, 0x73, 0x4d, 0xdb, 0x36, 0x2e, 0xa9, 0xa8,
-	0xc1, 0x36, 0xa1, 0xac, 0x5e, 0x57, 0x3a, 0x90, 0x75, 0xeb, 0x6c, 0x20, 0x99, 0x99, 0x51, 0x19,
-	0xe4, 0xfa, 0x56, 0x66, 0x61, 0xbc, 0x9f, 0x43, 0xaf, 0xea, 0xe8, 0xff, 0xd8, 0x02, 0x04, 0x0e,
-	0xf7, 0x05, 0xc9, 0x7b, 0x7f, 0x39, 0xf6, 0xee, 0xc1, 0xdd, 0x85, 0x7a, 0x6c, 0x1a, 0xcb, 0x6b,
-	0xe8, 0x7e, 0x7c, 0x49, 0x62, 0x51, 0xdc, 0xfd, 0x1e, 0x43, 0xab, 0xf8, 0xa1, 0x62, 0x0a, 0xfd,
-	0x60, 0x5f, 0xff, 0x72, 0xd9, 0xcf, 0x7f, 0xb9, 0xec, 0x9f, 0xe6, 0x14, 0xb8, 0x24, 0x96, 0x6b,
-	0x4c, 0x05, 0xe3, 0x24, 0xfc, 0x3c, 0x8e, 0xae, 0xf3, 0xff, 0x14, 0x25, 0xc6, 0xd4, 0x7e, 0xa7,
-	0x68, 0xbd, 0x7e, 0x5b, 0x83, 0x35, 0xa5, 0x7b, 0xe9, 0x1d, 0x46, 0x53, 0xd7, 0x8b, 0x93, 0xa2,
-	0x7a, 0x2e, 0x74, 0x8b, 0x73, 0xc1, 0x9c, 0x20, 0x4e, 0x79, 0x82, 0x54, 0x56, 0xb0, 0xfe, 0x35,
-	0x56, 0xe0, 0xfd, 0xa6, 0x0e, 0x9d, 0xcf, 0x88, 0xb8, 0x62, 0xfc, 0x42, 0x9e, 0x96, 0xe9, 0xd2,
-	0xc6, 0xf8, 0x1e, 0x6c, 0xf0, 0xd9, 0xe8, 0xfc, 0x5a, 0x14, 0xa7, 0x43, 0x93, 0xcf, 0x9e, 0x4a,
-	0x10, 0xbd, 0x05, 0xc0, 0x67, 0xa3, 0xa1, 0xaf, 0x9b, 0x61, 0x73, 0x38, 0xf0, 0x99, 0x41, 0xa0,
-	0x37, 0xa0, 0x85, 0x67, 0x23, 0xc2, 0x39, 0xe3, 0x69, 0x7e, 0x3a, 0xe0, 0xd9, 0xc7, 0x0a, 0x96,
-	0xbc, 0x78, 0x36, 0x0a, 0x39, 0x4b, 0x12, 0x12, 0xaa, 0xd3, 0xc1, 0xc1, 0x2d, 0x3c, 0x7b, 0xa6,
-	0x11, 0x52, 0xeb, 0x69, 0xae, 0x75, 0x5d, 0x6b, 0x3d, 0x2d, 0xb5, 0x9e, 0xce, 0x46, 0x89, 0xd1,
-	0xaa, 0x8f, 0x85, 0xd6, 0xa9, 0xad, 0xf5, 0xb4, 0xd0, 0xaa, 0xcf, 0x84, 0x8d, 0x53, 0x4b, 0xeb,
-	0x69, 0xa9, 0xb5, 0x95, 0xf3, 0x1a, 0xad, 0xde, 0x1f, 0x6b, 0xb0, 0x71, 0x98, 0x64, 0x67, 0xa9,
-	0x3f, 0x26, 0xe8, 0x01, 0xb4, 0x05, 0x13, 0x7e, 0x34, 0xca, 0x24, 0x68, 0x4e, 0x4e, 0x50, 0x28,
-	0x4d, 0xf0, 0x36, 0x74, 0x12, 0xc2, 0x83, 0x24, 0x33, 0x14, 0xf5, 0xdd, 0x86, 0x3c, 0xa1, 0x34,
-	0x4e, 0x93, 0xec, 0xc3, 0x6d, 0x35, 0x37, 0xa2, 0xf1, 0x48, 0x1f, 0x09, 0x53, 0x16, 0x12, 0xe3,
-	0xaa, 0xbe, 0x9a, 0x3a, 0x8a, 0x3f, 0x2d, 0x26, 0xd0, 0xb7, 0xa1, 0x5f, 0xd0, 0xcb, 0x56, 0x59,
-	0x51, 0x6b, 0xd7, 0x6d, 0x1a, 0xea, 0x33, 0x83, 0xf6, 0xbe, 0x2a, 0x72, 0x88, 0xc6, 0xe3, 0x67,
-	0xbe, 0xf0, 0x65, 0x1b, 0x95, 0xa8, 0x62, 0x92, 0x1a, 0x6b, 0x73, 0x10, 0x7d, 0x07, 0xfa, 0xc2,
-	0xe4, 0x5b, 0x38, 0xca, 0x69, 0xf4, 0x6e, 0x6e, 0x15, 0x13, 0x43, 0x43, 0xfc, 0x0d, 0xe8, 0x95,
-	0xc4, 0xaa, 0x0a, 0x69, 0x7b, 0xbb, 0x05, 0x56, 0x46, 0x93, 0xf7, 0x7b, 0xed, 0x2c, 0x1d, 0x39,
-	0xef, 0xa9, 0x36, 0xc1, 0x72, 0x55, 0xfb, 0x60, 0x33, 0x6f, 0xaf, 0x8c, 0x33, 0x54, 0x6b, 0xa0,
-	0xdd, 0xf2, 0x43, 0xd8, 0x14, 0x85, 0xe9, 0xa3, 0xd0, 0x17, 0xbe, 0x49, 0xbd, 0xb9, 0x2a, 0x6c,
-	0x16, 0x86, 0x7b, 0xa2, 0xba, 0xd0, 0xb7, 0xa1, 0xa3, 0xfb, 0x7e, 0xa3, 0x50, 0xdb, 0xd7, 0xd6,
-	0x38, 0xa5, 0xc2, 0xfb, 0x08, 0x5a, 0x43, 0x1a, 0xa6, 0xda, 0x3a, 0x17, 0x9a, 0x41, 0xc6, 0x39,
-	0x89, 0xf3, 0x06, 0x28, 0x07, 0x65, 0x79, 0x54, 0x3d, 0xb3, 0x71, 0x86, 0x06, 0x3c, 0x06, 0xa0,
-	0xcf, 0x6d, 0xa5, 0x6d, 0x1b, 0xd6, 0xec, 0x10, 0xd0, 0x80, 0x8c, 0xb3, 0xa9, 0x3f, 0x2b, 0xb6,
-	0x5e, 0xc5, 0xd9, 0xd4, 0x9f, 0xe9, 0x05, 0xba, 0xd0, 0x7c, 0xe9, 0xd3, 0x28, 0x30, 0xbf, 0x03,
-	0x1d, 0x9c, 0x83, 0xa5, 0x42, 0xc7, 0x56, 0xf8, 0x87, 0x3a, 0xb4, 0xb5, 0x46, 0x6d, 0xf0, 0x36,
-	0xac, 0x05, 0x7e, 0x30, 0x29, 0x54, 0x2a, 0x00, 0xbd, 0x9b, 0x1b, 0x52, 0xfd, 0x0d, 0x50, 0x9a,
-	0x9a, 0xdb, 0xf6, 0x10, 0x20, 0xbd, 0xf2, 0x13, 0xcb, 0x3b, 0x4b, 0xa9, 0x5b, 0x92, 0x48, 0x1b,
-	0xfc, 0x01, 0x74, 0x74, 0x7c, 0x1a, 0x1e, 0x67, 0x15, 0x4f, 0x5b, 0x93, 0x69, 0xae, 0x47, 0xf2,
-	0xca, 0xe5, 0x0b, 0xdd, 0xe2, 0xb7, 0x0f, 0xde, 0xaa, 0x90, 0xab, 0x95, 0xec, 0xab, 0xef, 0xc7,
-	0xb1, 0xe0, 0xd7, 0x58, 0xd3, 0x0e, 0x1e, 0x03, 0x94, 0x48, 0x59, 0xcf, 0x2e, 0xc8, 0x75, 0x7e,
-	0xb5, 0xbc, 0x20, 0xd7, 0x72, 0xed, 0x97, 0x7e, 0x94, 0xe5, 0x4e, 0xd5, 0xc0, 0x0f, 0xea, 0x8f,
-	0x6b, 0x5e, 0x00, 0x9b, 0x4f, 0xe5, 0x71, 0x6c, 0xb1, 0x57, 0x0e, 0x3d, 0x67, 0xe9, 0xa1, 0xe7,
-	0xe4, 0x7f, 0xb1, 0x7b, 0x50, 0x67, 0x89, 0x69, 0xb3, 0xeb, 0x2c, 0x29, 0x15, 0x39, 0x96, 0x22,
-	0xef, 0xef, 0x0e, 0x40, 0xa9, 0x05, 0x9d, 0xc0, 0x80, 0xb2, 0x91, 0xec, 0x12, 0x69, 0x40, 0x74,
-	0x41, 0x1a, 0x71, 0x12, 0x64, 0x3c, 0xa5, 0x97, 0xc4, 0x5c, 0x24, 0x76, 0x8a, 0x63, 0xaa, 0x62,
-	0x1c, 0xbe, 0x4b, 0xd9, 0x89, 0x66, 0x54, 0x95, 0x0b, 0xe7, 0x6c, 0xe8, 0x27, 0x70, 0xa7, 0x14,
-	0x1a, 0x5a, 0xf2, 0xea, 0x37, 0xca, 0xbb, 0x5d, 0xc8, 0x0b, 0x4b, 0x59, 0x3f, 0x82, 0xdb, 0x94,
-	0x8d, 0xbe, 0xcc, 0x48, 0x56, 0x91, 0xd4, 0xb8, 0x51, 0x52, 0x9f, 0xb2, 0x2f, 0x14, 0x47, 0x29,
-	0xe7, 0x0b, 0xb8, 0x67, 0x2d, 0x54, 0xa6, 0xbd, 0x25, 0xcd, 0xb9, 0x51, 0xda, 0x4e, 0x61, 0x97,
-	0x2c, 0x0c, 0xa5, 0xc8, 0x4f, 0x61, 0x87, 0xb2, 0xd1, 0x95, 0x4f, 0xc5, 0xbc, 0xbc, 0xb5, 0x57,
-	0xad, 0xf3, 0x85, 0x4f, 0x45, 0x55, 0x98, 0x5e, 0xe7, 0x94, 0xf0, 0x71, 0x65, 0x9d, 0xeb, 0xaf,
-	0x5a, 0xe7, 0xb1, 0xe2, 0x28, 0xe5, 0x3c, 0x85, 0x3e, 0x65, 0xf3, 0xf6, 0x34, 0x6f, 0x94, 0xb2,
-	0x49, 0x59, 0xd5, 0x96, 0x43, 0xe8, 0xa7, 0x24, 0x10, 0x8c, 0xdb, 0xb1, 0xb0, 0x71, 0xa3, 0x8c,
-	0x2d, 0xc3, 0x50, 0x08, 0xf1, 0xbe, 0x84, 0xce, 0x8f, 0xb3, 0x31, 0x11, 0xd1, 0x79, 0x91, 0xf3,
-	0xff, 0xed, 0x32, 0xf3, 0xaf, 0x3a, 0xb4, 0x0f, 0xc7, 0x9c, 0x65, 0x49, 0xa5, 0x6a, 0xeb, 0x1c,
-	0x5e, 0xa8, 0xda, 0x8a, 0x46, 0x55, 0x6d, 0x4d, 0xfd, 0x21, 0x74, 0xf4, 0xad, 0xc9, 0x30, 0xe8,
-	0x2a, 0x84, 0x16, 0x93, 0x3e, 0xbf, 0xa5, 0x69, 0xb6, 0x03, 0x73, 0x03, 0x35, 0x5c, 0xd5, 0x6a,
-	0x54, 0xba, 0x09, 0xc3, 0x79, 0x99, 0x75, 0x47, 0xd0, 0x9d, 0x68, 0xdf, 0x18, 0x2e, 0x1d, 0x80,
-	0xef, 0xe4, 0xc6, 0x95, 0x6b, 0xd8, 0xb7, 0x7d, 0xa8, 0x5d, 0xdd, 0x99, 0xd8, 0x6e, 0x7d, 0x1f,
-	0x40, 0xb6, 0xe4, 0xa3, 0xbc, 0x50, 0xd9, 0x0f, 0x10, 0xc5, 0x09, 0xa1, 0xdb, 0x76, 0x35, 0x1c,
-	0x9c, 0x42, 0x7f, 0x41, 0xe6, 0x92, 0x32, 0xf5, 0x2d, 0xbb, 0x4c, 0x95, 0xd7, 0x32, 0x9b, 0xd5,
-	0xae, 0x5d, 0x7f, 0xa9, 0xe9, 0x5f, 0x12, 0xe5, 0x3f, 0xe2, 0xc7, 0xd0, 0x8d, 0x75, 0xf3, 0x55,
-	0x6c, 0x80, 0x7d, 0xbf, 0xb3, 0x1b, 0x33, 0xdc, 0x89, 0xed, 0x36, 0xed, 0x43, 0xe8, 0x04, 0xca,
-	0x03, 0x4b, 0x37, 0xc2, 0x72, 0x0e, 0x6e, 0x07, 0xd6, 0x6e, 0x57, 0x1a, 0x45, 0xe7, 0xeb, 0x34,
-	0x8a, 0xe6, 0xaf, 0xe2, 0xaa, 0x07, 0x93, 0x83, 0x7f, 0xae, 0x43, 0xe3, 0xc9, 0xf0, 0x08, 0x9d,
-	0xc1, 0xd6, 0xfc, 0x7b, 0x23, 0xba, 0x6f, 0xcc, 0x5a, 0xf1, 0x46, 0x39, 0x78, 0xb0, 0x72, 0xde,
-	0xb4, 0xec, 0xb7, 0x10, 0x86, 0xcd, 0xb9, 0xd7, 0x25, 0x94, 0x1f, 0x35, 0xcb, 0x5f, 0xf0, 0x06,
-	0xf7, 0x57, 0x4d, 0xdb, 0x32, 0xe7, 0xee, 0x08, 0x85, 0xcc, 0xe5, 0xff, 0x72, 0x0a, 0x99, 0xab,
-	0xae, 0x16, 0xb7, 0xd0, 0xf7, 0x61, 0x5d, 0xbf, 0x37, 0xa1, 0xfc, 0xe2, 0x52, 0x79, 0xc9, 0x1a,
-	0xdc, 0x99, 0xc3, 0x16, 0x8c, 0xcf, 0xa1, 0x5b, 0x79, 0xa4, 0x44, 0x6f, 0x54, 0x74, 0x55, 0x9f,
-	0xab, 0x06, 0x6f, 0x2e, 0x9f, 0x2c, 0xa4, 0x1d, 0x02, 0x94, 0x4f, 0x12, 0xc8, 0x35, 0xd4, 0x0b,
-	0xcf, 0x5e, 0x83, 0x7b, 0x4b, 0x66, 0x0a, 0x21, 0x67, 0xb0, 0x35, 0xff, 0x3c, 0x80, 0xe6, 0xbc,
-	0x3a, 0xff, 0x73, 0xbe, 0xd8, 0xca, 0x95, 0xef, 0x0a, 0x4a, 0xec, 0xfc, 0x4f, 0xff, 0x42, 0xec,
-	0x8a, 0x27, 0x87, 0x42, 0xec, 0xca, 0xd7, 0x82, 0x5b, 0xe8, 0x73, 0xe8, 0x55, 0xff, 0xa2, 0xa3,
-	0xdc, 0x49, 0x4b, 0x9f, 0x11, 0x06, 0x6f, 0xad, 0x98, 0x2d, 0x04, 0x7e, 0x00, 0x6b, 0xfa, 0xf7,
-	0x78, 0x9e, 0x8e, 0xf6, 0x5f, 0xf5, 0xc1, 0x76, 0x15, 0x59, 0x70, 0x3d, 0x84, 0x75, 0x7d, 0xbb,
-	0x2c, 0x02, 0xa0, 0x72, 0xd9, 0x1c, 0x74, 0x6c, 0xac, 0x77, 0xeb, 0x61, 0x2d, 0xd7, 0x93, 0x56,
-	0xf4, 0xa4, 0xcb, 0xf4, 0x58, 0x9b, 0x73, 0xbe, 0xae, 0xd2, 0xf5, 0xd1, 0xbf, 0x03, 0x00, 0x00,
-	0xff, 0xff, 0x8c, 0xbd, 0xc2, 0x0b, 0x2e, 0x20, 0x00, 0x00,
-}
diff --git a/vendor/github.com/containerd/containerd/api/grpc/types/api.proto b/vendor/github.com/containerd/containerd/api/grpc/types/api.proto
deleted file mode 100644
index e9a6d10..0000000
--- a/vendor/github.com/containerd/containerd/api/grpc/types/api.proto
+++ /dev/null
@@ -1,346 +0,0 @@
-syntax = "proto3";
-
-package types;
-
-import "google/protobuf/timestamp.proto";
-
-service API {
-	rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {}
-	rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {}
-	rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {}
-	rpc Signal(SignalRequest) returns (SignalResponse) {}
-	rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {}
-	rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {}
-	rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {}
-	rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {}
-	rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {}
-	rpc State(StateRequest) returns (StateResponse) {}
-	rpc Events(EventsRequest) returns (stream Event) {}
-	rpc Stats(StatsRequest) returns (StatsResponse) {}
-}
-
-message GetServerVersionRequest {
-}
-
-message GetServerVersionResponse {
-	uint32 major = 1;
-	uint32 minor = 2;
-	uint32 patch = 3;
-	string revision = 4;
-}
-
-message UpdateProcessRequest {
-	string id = 1;
-	string pid = 2;
-	bool closeStdin = 3; // Close stdin of the container
-	uint32 width = 4;
-	uint32 height = 5;
-}
-
-message UpdateProcessResponse {
-}
-
-message CreateContainerRequest {
-	string id = 1; // ID of container
-	string bundlePath = 2; // path to OCI bundle
-	string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional)
-	string stdin = 4; // path to the file where stdin will be read (optional)
-	string stdout = 5; // path to file where stdout will be written (optional)
-	string stderr = 6; // path to file where stderr will be written (optional)
-	repeated string labels = 7;
-	bool noPivotRoot = 8;
-	string runtime = 9;
-	repeated string runtimeArgs = 10;
-	string checkpointDir = 11; // Directory where checkpoints are stored
-}
-
-message CreateContainerResponse {
-	Container container = 1;
-}
-
-message SignalRequest {
-	string id = 1; // ID of container
-	string pid = 2; // PID of process inside container
-	uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal"
-}
-
-message SignalResponse {
-}
-
-message AddProcessRequest {
-	string id = 1; // ID of container
-	bool terminal = 2; // Use tty for container stdio
-	User user = 3; // User under which process will be run
-	repeated string args = 4; // Arguments for process, first is binary path itself
-	repeated string env = 5; // List of environment variables for process
-	string cwd = 6; // Working directory of process
-	string pid = 7; // Process ID
-	string stdin = 8; // path to the file where stdin will be read (optional)
-	string stdout = 9; // path to file where stdout will be written (optional)
-	string stderr = 10; // path to file where stderr will be written (optional)
-	repeated string capabilities = 11;
-	string apparmorProfile = 12;
-	string selinuxLabel = 13;
-	bool noNewPrivileges = 14;
-	repeated Rlimit rlimits = 15;
-}
-
-message Rlimit {
-	string type = 1;
-	uint64 soft = 2;
-	uint64 hard = 3;
-}
-
-message User {
-	uint32 uid = 1; // UID of user
-	uint32 gid = 2; // GID of user
-	repeated uint32 additionalGids = 3; // Additional groups to which user will be added
-}
-
-message AddProcessResponse {
-	uint32 systemPid = 1;
-}
-
-message CreateCheckpointRequest {
-	string id = 1; // ID of container
-	Checkpoint checkpoint = 2; // Checkpoint configuration
-	string checkpointDir = 3; // Directory where checkpoints are stored
-}
-
-message CreateCheckpointResponse {
-}
-
-message DeleteCheckpointRequest {
-	string id = 1; // ID of container
-	string name = 2; // Name of checkpoint
-	string checkpointDir = 3; // Directory where checkpoints are stored
-}
-
-message DeleteCheckpointResponse {
-}
-
-message ListCheckpointRequest {
-	string id = 1; // ID of container
-	string checkpointDir = 2; // Directory where checkpoints are stored
-}
-
-message Checkpoint {
-	string name = 1; // Name of checkpoint
-	bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not
-	bool tcp = 3; // allow open tcp connections
-	bool unixSockets = 4; // allow external unix sockets
-	bool shell = 5; // allow shell-jobs
-	repeated string emptyNS = 6;
-}
-
-message ListCheckpointResponse {
-	repeated Checkpoint checkpoints = 1; // List of checkpoints
-}
-
-message StateRequest {
-	string id = 1; // container id for a single container
-}
-
-message ContainerState {
-	string status = 1;
-}
-
-message Process {
-	string pid = 1;
-	bool terminal = 2; // Use tty for container stdio
-	User user = 3; // User under which process will be run
-	repeated string args = 4; // Arguments for process, first is binary path itself
-	repeated string env = 5; // List of environment variables for process
-	string cwd = 6; // Working directory of process
-	uint32 systemPid = 7;
-	string stdin = 8; // path to the file where stdin will be read (optional)
-	string stdout = 9; // path to file where stdout will be written (optional)
-	string stderr = 10; // path to file where stderr will be written (optional)
-	repeated string capabilities = 11;
-	string apparmorProfile = 12;
-	string selinuxLabel = 13;
-	bool noNewPrivileges = 14;
-	repeated Rlimit rlimits = 15;
-}
-
-message Container {
-	string id = 1; // ID of container
-	string bundlePath = 2; // Path to OCI bundle
-	repeated Process processes = 3; // List of processes which run in container
-	string status = 4; // Container status ("running", "paused", etc.)
-	repeated string labels = 5;
-	repeated uint32 pids = 6;
-	string runtime = 7; // runtime used to execute the container
-}
-
-// Machine is information about machine on which containerd is run
-message Machine {
-	uint32 cpus = 1; // number of cpus
-	uint64 memory = 2; // amount of memory
-}
-
-// StateResponse is information about containerd daemon
-message StateResponse {
-	repeated Container containers = 1;
-	Machine machine = 2;
-}
-
-message UpdateContainerRequest {
-	string id = 1; // ID of container
-	string pid = 2;
-	string status = 3; // Status to which containerd will try to change
-	UpdateResource resources =4;
-}
-
-message UpdateResource {
-	uint64 blkioWeight = 1;
-	uint64 cpuShares = 2;
-	uint64 cpuPeriod = 3;
-	uint64 cpuQuota = 4;
-	string cpusetCpus = 5;
-	string cpusetMems = 6;
-	uint64 memoryLimit = 7;
-	uint64 memorySwap = 8;
-	uint64 memoryReservation = 9;
-	uint64 kernelMemoryLimit = 10;
-	uint64 kernelTCPMemoryLimit = 11;
-	uint64 blkioLeafWeight = 12;
-	repeated WeightDevice blkioWeightDevice = 13;
-	repeated ThrottleDevice blkioThrottleReadBpsDevice = 14;
-	repeated ThrottleDevice blkioThrottleWriteBpsDevice = 15;
-	repeated ThrottleDevice blkioThrottleReadIopsDevice = 16;
-	repeated ThrottleDevice blkioThrottleWriteIopsDevice = 17;
-	uint64 pidsLimit = 18;
-	uint64 cpuRealtimePeriod = 19;
-	int64 cpuRealtimeRuntime = 20;
-}
-
-message BlockIODevice {
-	int64 major = 1;
-	int64 minor = 2;
-}
-
-message WeightDevice {
-	BlockIODevice blkIODevice = 1;
-	uint32 weight = 2;
-	uint32 leafWeight = 3;
-}
-
-message ThrottleDevice {
-	BlockIODevice blkIODevice = 1;
-	uint64 rate = 2;
-}
-
-message UpdateContainerResponse {
-}
-
-message EventsRequest {
-	// Tag 1 is deprecated (old uint64 timestamp)
-	google.protobuf.Timestamp timestamp = 2;
-	bool storedOnly = 3;
-	string id = 4;
-}
-
-message Event {
-	string type = 1;
-	string id = 2;
-	uint32 status = 3;
-	string pid = 4;
-	// Tag 5 is deprecated (old uint64 timestamp)
-	google.protobuf.Timestamp timestamp = 6;
-}
-
-message NetworkStats {
-	string name = 1; // name of network interface
-	uint64 rx_bytes  = 2;
-	uint64 rx_Packets = 3;
-	uint64 Rx_errors  = 4;
-	uint64 Rx_dropped = 5;
-	uint64 Tx_bytes   = 6;
-	uint64 Tx_packets = 7;
-	uint64 Tx_errors  = 8;
-	uint64 Tx_dropped = 9;
-}
-
-message CpuUsage {
-	uint64 total_usage = 1;
-	repeated uint64 percpu_usage = 2;
-	uint64 usage_in_kernelmode = 3;
-	uint64 usage_in_usermode = 4;
-}
-
-message ThrottlingData {
-	uint64 periods = 1;
-	uint64 throttled_periods = 2;
-	uint64 throttled_time = 3;
-}
-
-message CpuStats {
-	CpuUsage cpu_usage = 1;
-	ThrottlingData throttling_data = 2;
-	uint64 system_usage = 3;
-}
-
-message PidsStats {
-	uint64 current = 1;
-	uint64 limit = 2;
-}
-
-message MemoryData {
-	uint64 usage = 1;
-	uint64 max_usage = 2;
-	uint64 failcnt = 3;
-	uint64 limit = 4;
-}
-
-message MemoryStats {
-	uint64 cache = 1;
-	MemoryData usage = 2;
-	MemoryData swap_usage = 3;
-	MemoryData kernel_usage = 4;
-	map<string, uint64> stats = 5;
-}
-
-message BlkioStatsEntry {
-	uint64 major = 1;
-	uint64 minor = 2;
-	string op = 3;
-	uint64 value = 4;
-}
-
-message BlkioStats {
-	repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device
-	repeated BlkioStatsEntry io_serviced_recursive = 2;
-	repeated BlkioStatsEntry io_queued_recursive = 3;
-	repeated BlkioStatsEntry io_service_time_recursive = 4;
-	repeated BlkioStatsEntry io_wait_time_recursive = 5;
-	repeated BlkioStatsEntry io_merged_recursive = 6;
-	repeated BlkioStatsEntry io_time_recursive = 7;
-	repeated BlkioStatsEntry sectors_recursive = 8;
-}
-
-message HugetlbStats {
-	uint64 usage = 1;
-	uint64 max_usage = 2;
-	uint64 failcnt = 3;
-	uint64 limit = 4;
-}
-
-message CgroupStats {
-	CpuStats cpu_stats = 1;
-	MemoryStats memory_stats  = 2;
-	BlkioStats blkio_stats = 3;
-	map<string, HugetlbStats> hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage"
-	PidsStats pids_stats = 5;
-}
-
-message StatsResponse {
-	repeated NetworkStats network_stats = 1;
-	CgroupStats cgroup_stats = 2;
-	// Tag 3 is deprecated (old uint64 timestamp)
-	google.protobuf.Timestamp timestamp = 4;
-};
-
-message StatsRequest {
-	string id = 1;
-}
diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
new file mode 100644
index 0000000..90d3099
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
@@ -0,0 +1,2741 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/containers/v1/containers.proto
+// DO NOT EDIT!
+
+/*
+	Package containers is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/containers/v1/containers.proto
+
+	It has these top-level messages:
+		Container
+		GetContainerRequest
+		GetContainerResponse
+		ListContainersRequest
+		ListContainersResponse
+		CreateContainerRequest
+		CreateContainerResponse
+		UpdateContainerRequest
+		UpdateContainerResponse
+		DeleteContainerRequest
+*/
+package containers
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
+import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf3 "github.com/gogo/protobuf/types"
+import _ "github.com/gogo/protobuf/types"
+
+import time "time"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Container struct {
+	// ID is the user-specified identifier.
+	//
+	// This field may not be updated.
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Labels provides an area to include arbitrary data on containers.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	//
+	// Note that to add a new value to this field, read the existing set and
+	// include the entire result in the update call.
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Image contains the reference of the image used to build the
+	// specification and snapshots for running this container.
+	//
+	// If this field is updated, the spec and rootfs needed to updated, as well.
+	Image string `protobuf:"bytes,3,opt,name=image,proto3" json:"image,omitempty"`
+	// Runtime specifies which runtime to use for executing this container.
+	Runtime *Container_Runtime `protobuf:"bytes,4,opt,name=runtime" json:"runtime,omitempty"`
+	// Spec to be used when creating the container. This is runtime specific.
+	Spec *google_protobuf1.Any `protobuf:"bytes,5,opt,name=spec" json:"spec,omitempty"`
+	// Snapshotter specifies the snapshotter name used for rootfs
+	Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	// SnapshotKey specifies the snapshot key to use for the container's root
+	// filesystem. When starting a task from this container, a caller should
+	// look up the mounts from the snapshot service and include those on the
+	// task create request.
+	//
+	// Snapshots referenced in this field will not be garbage collected.
+	//
+	// This field is set to empty when the rootfs is not a snapshot.
+	//
+	// This field may be updated.
+	SnapshotKey string `protobuf:"bytes,7,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"`
+	// CreatedAt is the time the container was first created.
+	CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	// UpdatedAt is the last time the container was mutated.
+	UpdatedAt time.Time `protobuf:"bytes,9,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	// Extensions allow clients to provide zero or more blobs that are directly
+	// associated with the container. One may provide protobuf, json, or other
+	// encoding formats. The primary use of this is to further decorate the
+	// container object with fields that may be specific to a client integration.
+	//
+	// The key portion of this map should identify a "name" for the extension
+	// that should be unique against other extensions. When updating extension
+	// data, one should only update the specified extension using field paths
+	// to select a specific map key.
+	Extensions map[string]google_protobuf1.Any `protobuf:"bytes,10,rep,name=extensions" json:"extensions" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"`
+}
+
+func (m *Container) Reset()                    { *m = Container{} }
+func (*Container) ProtoMessage()               {}
+func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0} }
+
+type Container_Runtime struct {
+	// Name is the name of the runtime.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// Options specify additional runtime initialization options.
+	Options *google_protobuf1.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *Container_Runtime) Reset()                    { *m = Container_Runtime{} }
+func (*Container_Runtime) ProtoMessage()               {}
+func (*Container_Runtime) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{0, 1} }
+
+type GetContainerRequest struct {
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *GetContainerRequest) Reset()                    { *m = GetContainerRequest{} }
+func (*GetContainerRequest) ProtoMessage()               {}
+func (*GetContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{1} }
+
+type GetContainerResponse struct {
+	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+}
+
+func (m *GetContainerResponse) Reset()                    { *m = GetContainerResponse{} }
+func (*GetContainerResponse) ProtoMessage()               {}
+func (*GetContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{2} }
+
+type ListContainersRequest struct {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, containers that match the following will be
+	// returned:
+	//
+	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListContainersRequest) Reset()                    { *m = ListContainersRequest{} }
+func (*ListContainersRequest) ProtoMessage()               {}
+func (*ListContainersRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{3} }
+
+type ListContainersResponse struct {
+	Containers []Container `protobuf:"bytes,1,rep,name=containers" json:"containers"`
+}
+
+func (m *ListContainersResponse) Reset()                    { *m = ListContainersResponse{} }
+func (*ListContainersResponse) ProtoMessage()               {}
+func (*ListContainersResponse) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{4} }
+
+type CreateContainerRequest struct {
+	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+}
+
+func (m *CreateContainerRequest) Reset()                    { *m = CreateContainerRequest{} }
+func (*CreateContainerRequest) ProtoMessage()               {}
+func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{5} }
+
+type CreateContainerResponse struct {
+	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+}
+
+func (m *CreateContainerResponse) Reset()      { *m = CreateContainerResponse{} }
+func (*CreateContainerResponse) ProtoMessage() {}
+func (*CreateContainerResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptorContainers, []int{6}
+}
+
+// UpdateContainerRequest updates the metadata on one or more container.
+//
+// The operation should follow semantics described in
+// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
+// unless otherwise qualified.
+type UpdateContainerRequest struct {
+	// Container provides the target values, as declared by the mask, for the update.
+	//
+	// The ID field must be set.
+	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	UpdateMask *google_protobuf3.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+}
+
+func (m *UpdateContainerRequest) Reset()                    { *m = UpdateContainerRequest{} }
+func (*UpdateContainerRequest) ProtoMessage()               {}
+func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{7} }
+
+type UpdateContainerResponse struct {
+	Container Container `protobuf:"bytes,1,opt,name=container" json:"container"`
+}
+
+func (m *UpdateContainerResponse) Reset()      { *m = UpdateContainerResponse{} }
+func (*UpdateContainerResponse) ProtoMessage() {}
+func (*UpdateContainerResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptorContainers, []int{8}
+}
+
+type DeleteContainerRequest struct {
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *DeleteContainerRequest) Reset()                    { *m = DeleteContainerRequest{} }
+func (*DeleteContainerRequest) ProtoMessage()               {}
+func (*DeleteContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptorContainers, []int{9} }
+
+func init() {
+	proto.RegisterType((*Container)(nil), "containerd.services.containers.v1.Container")
+	proto.RegisterType((*Container_Runtime)(nil), "containerd.services.containers.v1.Container.Runtime")
+	proto.RegisterType((*GetContainerRequest)(nil), "containerd.services.containers.v1.GetContainerRequest")
+	proto.RegisterType((*GetContainerResponse)(nil), "containerd.services.containers.v1.GetContainerResponse")
+	proto.RegisterType((*ListContainersRequest)(nil), "containerd.services.containers.v1.ListContainersRequest")
+	proto.RegisterType((*ListContainersResponse)(nil), "containerd.services.containers.v1.ListContainersResponse")
+	proto.RegisterType((*CreateContainerRequest)(nil), "containerd.services.containers.v1.CreateContainerRequest")
+	proto.RegisterType((*CreateContainerResponse)(nil), "containerd.services.containers.v1.CreateContainerResponse")
+	proto.RegisterType((*UpdateContainerRequest)(nil), "containerd.services.containers.v1.UpdateContainerRequest")
+	proto.RegisterType((*UpdateContainerResponse)(nil), "containerd.services.containers.v1.UpdateContainerResponse")
+	proto.RegisterType((*DeleteContainerRequest)(nil), "containerd.services.containers.v1.DeleteContainerRequest")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Containers service
+
+type ContainersClient interface {
+	Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error)
+	List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error)
+	Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error)
+	Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error)
+	Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+}
+
+type containersClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewContainersClient(cc *grpc.ClientConn) ContainersClient {
+	return &containersClient{cc}
+}
+
+func (c *containersClient) Get(ctx context.Context, in *GetContainerRequest, opts ...grpc.CallOption) (*GetContainerResponse, error) {
+	out := new(GetContainerResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Get", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *containersClient) List(ctx context.Context, in *ListContainersRequest, opts ...grpc.CallOption) (*ListContainersResponse, error) {
+	out := new(ListContainersResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/List", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *containersClient) Create(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) {
+	out := new(CreateContainerResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Create", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *containersClient) Update(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) {
+	out := new(UpdateContainerResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Update", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *containersClient) Delete(ctx context.Context, in *DeleteContainerRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.containers.v1.Containers/Delete", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Containers service
+
+type ContainersServer interface {
+	Get(context.Context, *GetContainerRequest) (*GetContainerResponse, error)
+	List(context.Context, *ListContainersRequest) (*ListContainersResponse, error)
+	Create(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error)
+	Update(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error)
+	Delete(context.Context, *DeleteContainerRequest) (*google_protobuf2.Empty, error)
+}
+
+func RegisterContainersServer(s *grpc.Server, srv ContainersServer) {
+	s.RegisterService(&_Containers_serviceDesc, srv)
+}
+
+func _Containers_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetContainerRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContainersServer).Get(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.containers.v1.Containers/Get",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContainersServer).Get(ctx, req.(*GetContainerRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Containers_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListContainersRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContainersServer).List(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.containers.v1.Containers/List",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContainersServer).List(ctx, req.(*ListContainersRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Containers_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateContainerRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContainersServer).Create(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.containers.v1.Containers/Create",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContainersServer).Create(ctx, req.(*CreateContainerRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Containers_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateContainerRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContainersServer).Update(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.containers.v1.Containers/Update",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContainersServer).Update(ctx, req.(*UpdateContainerRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Containers_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteContainerRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContainersServer).Delete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.containers.v1.Containers/Delete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContainersServer).Delete(ctx, req.(*DeleteContainerRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Containers_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.containers.v1.Containers",
+	HandlerType: (*ContainersServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Get",
+			Handler:    _Containers_Get_Handler,
+		},
+		{
+			MethodName: "List",
+			Handler:    _Containers_List_Handler,
+		},
+		{
+			MethodName: "Create",
+			Handler:    _Containers_Create_Handler,
+		},
+		{
+			MethodName: "Update",
+			Handler:    _Containers_Update_Handler,
+		},
+		{
+			MethodName: "Delete",
+			Handler:    _Containers_Delete_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/containerd/containerd/api/services/containers/v1/containers.proto",
+}
+
+func (m *Container) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Container) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x12
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + len(v) + sovContainers(uint64(len(v)))
+			i = encodeVarintContainers(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintContainers(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintContainers(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	if len(m.Image) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(len(m.Image)))
+		i += copy(dAtA[i:], m.Image)
+	}
+	if m.Runtime != nil {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(m.Runtime.Size()))
+		n1, err := m.Runtime.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	if m.Spec != nil {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(m.Spec.Size()))
+		n2, err := m.Spec.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	if len(m.SnapshotKey) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(len(m.SnapshotKey)))
+		i += copy(dAtA[i:], m.SnapshotKey)
+	}
+	dAtA[i] = 0x42
+	i++
+	i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	dAtA[i] = 0x4a
+	i++
+	i = encodeVarintContainers(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n4
+	if len(m.Extensions) > 0 {
+		for k, _ := range m.Extensions {
+			dAtA[i] = 0x52
+			i++
+			v := m.Extensions[k]
+			msgSize := 0
+			if (&v) != nil {
+				msgSize = (&v).Size()
+				msgSize += 1 + sovContainers(uint64(msgSize))
+			}
+			mapSize := 1 + len(k) + sovContainers(uint64(len(k))) + msgSize
+			i = encodeVarintContainers(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintContainers(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintContainers(dAtA, i, uint64((&v).Size()))
+			n5, err := (&v).MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n5
+		}
+	}
+	return i, nil
+}
+
+func (m *Container_Runtime) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Container_Runtime) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if m.Options != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(m.Options.Size()))
+		n6, err := m.Options.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n6
+	}
+	return i, nil
+}
+
+func (m *GetContainerRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetContainerRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	return i, nil
+}
+
+func (m *GetContainerResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetContainerResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
+	n7, err := m.Container.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n7
+	return i, nil
+}
+
+func (m *ListContainersRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListContainersRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			dAtA[i] = 0xa
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	return i, nil
+}
+
+func (m *ListContainersResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListContainersResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Containers) > 0 {
+		for _, msg := range m.Containers {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintContainers(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *CreateContainerRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateContainerRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
+	n8, err := m.Container.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n8
+	return i, nil
+}
+
+func (m *CreateContainerResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateContainerResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
+	n9, err := m.Container.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n9
+	return i, nil
+}
+
+func (m *UpdateContainerRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateContainerRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
+	n10, err := m.Container.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n10
+	if m.UpdateMask != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(m.UpdateMask.Size()))
+		n11, err := m.UpdateMask.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n11
+	}
+	return i, nil
+}
+
+func (m *UpdateContainerResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateContainerResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContainers(dAtA, i, uint64(m.Container.Size()))
+	n12, err := m.Container.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n12
+	return i, nil
+}
+
+func (m *DeleteContainerRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteContainerRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContainers(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	return i, nil
+}
+
+func encodeFixed64Containers(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Containers(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintContainers(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Container) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + len(v) + sovContainers(uint64(len(v)))
+			n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize))
+		}
+	}
+	l = len(m.Image)
+	if l > 0 {
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	if m.Runtime != nil {
+		l = m.Runtime.Size()
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	if m.Spec != nil {
+		l = m.Spec.Size()
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	l = len(m.SnapshotKey)
+	if l > 0 {
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
+	n += 1 + l + sovContainers(uint64(l))
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
+	n += 1 + l + sovContainers(uint64(l))
+	if len(m.Extensions) > 0 {
+		for k, v := range m.Extensions {
+			_ = k
+			_ = v
+			l = v.Size()
+			mapEntrySize := 1 + len(k) + sovContainers(uint64(len(k))) + 1 + l + sovContainers(uint64(l))
+			n += mapEntrySize + 1 + sovContainers(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *Container_Runtime) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	if m.Options != nil {
+		l = m.Options.Size()
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	return n
+}
+
+func (m *GetContainerRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	return n
+}
+
+func (m *GetContainerResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Container.Size()
+	n += 1 + l + sovContainers(uint64(l))
+	return n
+}
+
+func (m *ListContainersRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			l = len(s)
+			n += 1 + l + sovContainers(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ListContainersResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Containers) > 0 {
+		for _, e := range m.Containers {
+			l = e.Size()
+			n += 1 + l + sovContainers(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *CreateContainerRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Container.Size()
+	n += 1 + l + sovContainers(uint64(l))
+	return n
+}
+
+func (m *CreateContainerResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Container.Size()
+	n += 1 + l + sovContainers(uint64(l))
+	return n
+}
+
+func (m *UpdateContainerRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Container.Size()
+	n += 1 + l + sovContainers(uint64(l))
+	if m.UpdateMask != nil {
+		l = m.UpdateMask.Size()
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	return n
+}
+
+func (m *UpdateContainerResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Container.Size()
+	n += 1 + l + sovContainers(uint64(l))
+	return n
+}
+
+func (m *DeleteContainerRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovContainers(uint64(l))
+	}
+	return n
+}
+
+func sovContainers(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozContainers(x uint64) (n int) {
+	return sovContainers(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Container) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	keysForExtensions := make([]string, 0, len(this.Extensions))
+	for k, _ := range this.Extensions {
+		keysForExtensions = append(keysForExtensions, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForExtensions)
+	mapStringForExtensions := "map[string]google_protobuf1.Any{"
+	for _, k := range keysForExtensions {
+		mapStringForExtensions += fmt.Sprintf("%v: %v,", k, this.Extensions[k])
+	}
+	mapStringForExtensions += "}"
+	s := strings.Join([]string{`&Container{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+		`Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "Container_Runtime", "Container_Runtime", 1) + `,`,
+		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Extensions:` + mapStringForExtensions + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Container_Runtime) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Container_Runtime{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetContainerRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetContainerRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetContainerResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetContainerResponse{`,
+		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListContainersRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListContainersRequest{`,
+		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListContainersResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListContainersResponse{`,
+		`Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateContainerRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateContainerRequest{`,
+		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateContainerResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateContainerResponse{`,
+		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateContainerRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateContainerRequest{`,
+		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf3.FieldMask", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateContainerResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateContainerResponse{`,
+		`Container:` + strings.Replace(strings.Replace(this.Container.String(), "Container", "Container", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteContainerRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteContainerRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringContainers(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Container) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Container: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContainers
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContainers
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthContainers
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Image = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Runtime == nil {
+				m.Runtime = &Container_Runtime{}
+			}
+			if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Spec == nil {
+				m.Spec = &google_protobuf1.Any{}
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SnapshotKey = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Extensions", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Extensions == nil {
+				m.Extensions = make(map[string]google_protobuf1.Any)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContainers
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var mapmsglen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContainers
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					mapmsglen |= (int(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if mapmsglen < 0 {
+					return ErrInvalidLengthContainers
+				}
+				postmsgIndex := iNdEx + mapmsglen
+				if mapmsglen < 0 {
+					return ErrInvalidLengthContainers
+				}
+				if postmsgIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := &google_protobuf1.Any{}
+				if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
+					return err
+				}
+				iNdEx = postmsgIndex
+				m.Extensions[mapkey] = *mapvalue
+			} else {
+				var mapvalue google_protobuf1.Any
+				m.Extensions[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Container_Runtime) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Runtime: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = &google_protobuf1.Any{}
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetContainerRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetContainerRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetContainerResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetContainerResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListContainersRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListContainersRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListContainersRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListContainersResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListContainersResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListContainersResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Containers = append(m.Containers, Container{})
+			if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateContainerRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateContainerRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateContainerResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateContainerResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateContainerRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateContainerRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.UpdateMask == nil {
+				m.UpdateMask = &google_protobuf3.FieldMask{}
+			}
+			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateContainerResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateContainerResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateContainerResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Container.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteContainerRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteContainerRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteContainerRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainers
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainers(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainers
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipContainers(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowContainers
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowContainers
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthContainers
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowContainers
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipContainers(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthContainers = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowContainers   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/containers/v1/containers.proto", fileDescriptorContainers)
+}
+
+var fileDescriptorContainers = []byte{
+	// 776 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x72, 0x12, 0x5b,
+	0x10, 0xce, 0x00, 0x81, 0xd0, 0xdc, 0xaa, 0x7b, 0xeb, 0x5c, 0x2e, 0x77, 0x1c, 0xab, 0x80, 0xb0,
+	0xa2, 0x2c, 0x1d, 0x0c, 0x5a, 0x9a, 0x1f, 0x37, 0x21, 0x7f, 0x65, 0x99, 0x58, 0xa9, 0x51, 0x37,
+	0xba, 0x88, 0x03, 0x74, 0xc8, 0xc8, 0xfc, 0x39, 0xe7, 0x40, 0x49, 0xb9, 0xd0, 0x47, 0x70, 0xe7,
+	0x23, 0xf8, 0x2a, 0x59, 0xba, 0x74, 0x15, 0x13, 0x9e, 0xc4, 0x9a, 0x33, 0x33, 0xcc, 0x04, 0x06,
+	0x85, 0x68, 0x76, 0xa7, 0x39, 0xfd, 0x7d, 0xfd, 0xf1, 0x75, 0xf7, 0x01, 0xd8, 0xef, 0x68, 0xec,
+	0xa4, 0xd7, 0x94, 0x5b, 0x96, 0x51, 0x6b, 0x59, 0x26, 0x53, 0x35, 0x13, 0x9d, 0x76, 0xf4, 0xa8,
+	0xda, 0x5a, 0x8d, 0xa2, 0xd3, 0xd7, 0x5a, 0x48, 0xc3, 0xcf, 0x69, 0xad, 0xbf, 0x12, 0x89, 0x64,
+	0xdb, 0xb1, 0x98, 0x45, 0x96, 0x43, 0x9c, 0x1c, 0x60, 0xe4, 0x48, 0x56, 0x7f, 0x45, 0xca, 0x77,
+	0xac, 0x8e, 0xc5, 0xb3, 0x6b, 0xee, 0xc9, 0x03, 0x4a, 0x37, 0x3a, 0x96, 0xd5, 0xd1, 0xb1, 0xc6,
+	0xa3, 0x66, 0xef, 0xb8, 0xa6, 0x9a, 0x03, 0xff, 0xea, 0xe6, 0xf8, 0x15, 0x1a, 0x36, 0x0b, 0x2e,
+	0xcb, 0xe3, 0x97, 0xc7, 0x1a, 0xea, 0xed, 0x23, 0x43, 0xa5, 0x5d, 0x3f, 0xa3, 0x34, 0x9e, 0xc1,
+	0x34, 0x03, 0x29, 0x53, 0x0d, 0xdb, 0x4b, 0xa8, 0x7c, 0x4e, 0x43, 0x76, 0x2b, 0x90, 0x48, 0x0a,
+	0x90, 0xd0, 0xda, 0xa2, 0x50, 0x16, 0xaa, 0xd9, 0x46, 0x7a, 0x78, 0x56, 0x4a, 0x3c, 0xde, 0x56,
+	0x12, 0x5a, 0x9b, 0x1c, 0x42, 0x5a, 0x57, 0x9b, 0xa8, 0x53, 0x31, 0x51, 0x4e, 0x56, 0x73, 0xf5,
+	0x55, 0xf9, 0x97, 0x5f, 0x55, 0x1e, 0xb1, 0xca, 0xfb, 0x1c, 0xba, 0x63, 0x32, 0x67, 0xa0, 0xf8,
+	0x3c, 0x24, 0x0f, 0x8b, 0x9a, 0xa1, 0x76, 0x50, 0x4c, 0xba, 0xc5, 0x14, 0x2f, 0x20, 0x4f, 0x21,
+	0xe3, 0xf4, 0x4c, 0x57, 0xa3, 0x98, 0x2a, 0x0b, 0xd5, 0x5c, 0xfd, 0xfe, 0x5c, 0x85, 0x14, 0x0f,
+	0xab, 0x04, 0x24, 0xa4, 0x0a, 0x29, 0x6a, 0x63, 0x4b, 0x5c, 0xe4, 0x64, 0x79, 0xd9, 0x73, 0x43,
+	0x0e, 0xdc, 0x90, 0x37, 0xcd, 0x81, 0xc2, 0x33, 0x48, 0x19, 0x72, 0xd4, 0x54, 0x6d, 0x7a, 0x62,
+	0x31, 0x86, 0x8e, 0x98, 0xe6, 0xaa, 0xa2, 0x1f, 0x91, 0x65, 0xf8, 0x2b, 0x08, 0x8f, 0xba, 0x38,
+	0x10, 0x33, 0x97, 0x53, 0x9e, 0xe0, 0x80, 0x6c, 0x01, 0xb4, 0x1c, 0x54, 0x19, 0xb6, 0x8f, 0x54,
+	0x26, 0x2e, 0xf1, 0xa2, 0xd2, 0x44, 0xd1, 0xe7, 0x41, 0x0b, 0x1a, 0x4b, 0xa7, 0x67, 0xa5, 0x85,
+	0x4f, 0xdf, 0x4b, 0x82, 0x92, 0xf5, 0x71, 0x9b, 0xcc, 0x25, 0xe9, 0xd9, 0xed, 0x80, 0x24, 0x3b,
+	0x0f, 0x89, 0x8f, 0xdb, 0x64, 0xa4, 0x09, 0x80, 0xef, 0x18, 0x9a, 0x54, 0xb3, 0x4c, 0x2a, 0x02,
+	0x6f, 0xda, 0xa3, 0xb9, 0xbc, 0xdc, 0x19, 0xc1, 0x79, 0xe3, 0x1a, 0x29, 0xb7, 0x8c, 0x12, 0x61,
+	0x95, 0xd6, 0x20, 0x17, 0xe9, 0x2c, 0xf9, 0x07, 0x92, 0xae, 0x2d, 0x7c, 0x78, 0x14, 0xf7, 0xe8,
+	0xf6, 0xb8, 0xaf, 0xea, 0x3d, 0x14, 0x13, 0x5e, 0x8f, 0x79, 0xb0, 0x9e, 0x58, 0x15, 0xa4, 0x03,
+	0xc8, 0xf8, 0xbd, 0x22, 0x04, 0x52, 0xa6, 0x6a, 0xa0, 0x8f, 0xe3, 0x67, 0x22, 0x43, 0xc6, 0xb2,
+	0x19, 0x97, 0x9e, 0xf8, 0x49, 0xe7, 0x82, 0x24, 0xe9, 0x19, 0xfc, 0x3d, 0x26, 0x37, 0x46, 0xcd,
+	0xad, 0xa8, 0x9a, 0x69, 0x94, 0xa1, 0xc6, 0xca, 0x1d, 0xf8, 0x77, 0x0f, 0xd9, 0xc8, 0x10, 0x05,
+	0xdf, 0xf6, 0x90, 0xb2, 0x69, 0x2b, 0x52, 0x39, 0x81, 0xfc, 0xe5, 0x74, 0x6a, 0x5b, 0x26, 0x45,
+	0x72, 0x08, 0xd9, 0x91, 0xc5, 0x1c, 0x96, 0xab, 0xdf, 0x9e, 0xa7, 0x11, 0xbe, 0xf1, 0x21, 0x49,
+	0x65, 0x05, 0xfe, 0xdb, 0xd7, 0x68, 0x58, 0x8a, 0x06, 0xd2, 0x44, 0xc8, 0x1c, 0x6b, 0x3a, 0x43,
+	0x87, 0x8a, 0x42, 0x39, 0x59, 0xcd, 0x2a, 0x41, 0x58, 0xd1, 0xa1, 0x30, 0x0e, 0xf1, 0xe5, 0x29,
+	0x00, 0x61, 0x61, 0x0e, 0xbb, 0x9a, 0xbe, 0x08, 0x4b, 0xe5, 0x0d, 0x14, 0xb6, 0xf8, 0x38, 0x4f,
+	0x98, 0xf7, 0xe7, 0xcd, 0xe8, 0xc2, 0xff, 0x13, 0xb5, 0xae, 0xcd, 0xf9, 0x2f, 0x02, 0x14, 0x5e,
+	0xf0, 0x1d, 0xbb, 0xfe, 0x6f, 0x46, 0x36, 0x20, 0xe7, 0xed, 0x33, 0x7f, 0xcf, 0xfd, 0xa9, 0x9d,
+	0x7c, 0x08, 0x76, 0xdd, 0x27, 0xff, 0x40, 0xa5, 0x5d, 0xc5, 0x7f, 0x36, 0xdc, 0xb3, 0x6b, 0xcb,
+	0x84, 0xd0, 0x6b, 0xb3, 0xe5, 0x2e, 0x14, 0xb6, 0x51, 0xc7, 0x18, 0x57, 0xa6, 0x2c, 0x4b, 0xfd,
+	0x3c, 0x05, 0x10, 0x0e, 0x23, 0xe9, 0x43, 0x72, 0x0f, 0x19, 0x79, 0x30, 0x83, 0x8c, 0x98, 0x95,
+	0x94, 0x1e, 0xce, 0x8d, 0xf3, 0xad, 0x78, 0x0f, 0x29, 0x77, 0x2d, 0xc8, 0x2c, 0x3f, 0x67, 0xb1,
+	0x2b, 0x27, 0xad, 0x5d, 0x01, 0xe9, 0x17, 0xff, 0x00, 0x69, 0x6f, 0x72, 0xc9, 0x2c, 0x24, 0xf1,
+	0x0b, 0x25, 0xad, 0x5f, 0x05, 0x1a, 0x0a, 0xf0, 0x66, 0x64, 0x26, 0x01, 0xf1, 0x73, 0x3f, 0x93,
+	0x80, 0x69, 0x93, 0xf8, 0x0a, 0xd2, 0xde, 0xdc, 0xcc, 0x24, 0x20, 0x7e, 0xc4, 0xa4, 0xc2, 0xc4,
+	0x46, 0xec, 0xb8, 0xff, 0x90, 0x1a, 0xaf, 0x4f, 0x2f, 0x8a, 0x0b, 0xdf, 0x2e, 0x8a, 0x0b, 0x1f,
+	0x87, 0x45, 0xe1, 0x74, 0x58, 0x14, 0xbe, 0x0e, 0x8b, 0xc2, 0xf9, 0xb0, 0x28, 0xbc, 0xdc, 0xfd,
+	0x8d, 0x3f, 0x7d, 0x1b, 0x61, 0xd4, 0x4c, 0xf3, 0x8a, 0xf7, 0x7e, 0x04, 0x00, 0x00, 0xff, 0xff,
+	0x17, 0x73, 0xba, 0x43, 0x45, 0x0a, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
new file mode 100644
index 0000000..0a2311c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
@@ -0,0 +1,158 @@
+syntax = "proto3";
+
+package containerd.services.containers.v1;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/containers/v1;containers";
+
+// Containers provides metadata storage for containers used in the execution
+// service.
+//
+// The objects here provide an state-independent view of containers for use in
+// management and resource pinning. From that perspective, containers do not
+// have a "state" but rather this is the set of resources that will be
+// considered in use by the container.
+//
+// From the perspective of the execution service, these objects represent the
+// base parameters for creating a container process.
+//
+// In general, when looking to add fields for this type, first ask yourself
+// whether or not the function of the field has to do with runtime execution or
+// is invariant of the runtime state of the container. If it has to do with
+// runtime, or changes as the "container" is started and stops, it probably
+// doesn't belong on this object.
+service Containers {
+	rpc Get(GetContainerRequest) returns (GetContainerResponse);
+	rpc List(ListContainersRequest) returns (ListContainersResponse);
+	rpc Create(CreateContainerRequest) returns (CreateContainerResponse);
+	rpc Update(UpdateContainerRequest) returns (UpdateContainerResponse);
+	rpc Delete(DeleteContainerRequest) returns (google.protobuf.Empty);
+}
+
+message Container {
+	// ID is the user-specified identifier.
+	//
+	// This field may not be updated.
+	string id = 1;
+
+	// Labels provides an area to include arbitrary data on containers.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	//
+	// Note that to add a new value to this field, read the existing set and
+	// include the entire result in the update call.
+	map<string, string> labels  = 2;
+
+	// Image contains the reference of the image used to build the
+	// specification and snapshots for running this container.
+	//
+	// If this field is updated, the spec and rootfs needed to updated, as well.
+	string image = 3;
+
+	message Runtime {
+		// Name is the name of the runtime.
+		string name = 1;
+		// Options specify additional runtime initialization options.
+		google.protobuf.Any options = 2;
+	}
+	// Runtime specifies which runtime to use for executing this container.
+	Runtime runtime = 4;
+
+	// Spec to be used when creating the container. This is runtime specific.
+	google.protobuf.Any spec = 5;
+
+	// Snapshotter specifies the snapshotter name used for rootfs
+	string snapshotter = 6;
+
+	// SnapshotKey specifies the snapshot key to use for the container's root
+	// filesystem. When starting a task from this container, a caller should
+	// look up the mounts from the snapshot service and include those on the
+	// task create request.
+	//
+	// Snapshots referenced in this field will not be garbage collected.
+	//
+	// This field is set to empty when the rootfs is not a snapshot.
+	//
+	// This field may be updated.
+	string snapshot_key = 7;
+
+	// CreatedAt is the time the container was first created.
+	google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// UpdatedAt is the last time the container was mutated.
+	google.protobuf.Timestamp updated_at = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// Extensions allow clients to provide zero or more blobs that are directly
+	// associated with the container. One may provide protobuf, json, or other
+	// encoding formats. The primary use of this is to further decorate the
+	// container object with fields that may be specific to a client integration.
+	//
+	// The key portion of this map should identify a "name" for the extension
+	// that should be unique against other extensions. When updating extension
+	// data, one should only update the specified extension using field paths
+	// to select a specific map key.
+	map<string, google.protobuf.Any> extensions = 10 [(gogoproto.nullable) = false];
+}
+
+message GetContainerRequest {
+	string id = 1;
+}
+
+message GetContainerResponse {
+	Container container = 1 [(gogoproto.nullable) = false];
+}
+
+message ListContainersRequest {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, containers that match the following will be
+	// returned:
+	//
+	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	repeated string filters = 1;
+}
+
+message ListContainersResponse {
+	repeated Container containers = 1 [(gogoproto.nullable) = false];
+}
+
+message CreateContainerRequest {
+	Container container = 1 [(gogoproto.nullable) = false];
+}
+
+message CreateContainerResponse {
+	Container container = 1 [(gogoproto.nullable) = false];
+}
+
+// UpdateContainerRequest updates the metadata on one or more container.
+//
+// The operation should follow semantics described in
+// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
+// unless otherwise qualified.
+message UpdateContainerRequest {
+	// Container provides the target values, as declared by the mask, for the update.
+	//
+	// The ID field must be set.
+	Container container = 1 [(gogoproto.nullable) = false];
+
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	google.protobuf.FieldMask update_mask = 2;
+}
+
+message UpdateContainerResponse {
+	Container container = 1 [(gogoproto.nullable) = false];
+}
+
+message DeleteContainerRequest {
+	string id = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
new file mode 100644
index 0000000..c9b76b5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.pb.go
@@ -0,0 +1,4463 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/content/v1/content.proto
+// DO NOT EDIT!
+
+/*
+	Package content is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/content/v1/content.proto
+
+	It has these top-level messages:
+		Info
+		InfoRequest
+		InfoResponse
+		UpdateRequest
+		UpdateResponse
+		ListContentRequest
+		ListContentResponse
+		DeleteContentRequest
+		ReadContentRequest
+		ReadContentResponse
+		Status
+		StatusRequest
+		StatusResponse
+		ListStatusesRequest
+		ListStatusesResponse
+		WriteContentRequest
+		WriteContentResponse
+		AbortRequest
+*/
+package content
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
+import _ "github.com/gogo/protobuf/types"
+import google_protobuf3 "github.com/golang/protobuf/ptypes/empty"
+
+import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
+import time "time"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// WriteAction defines the behavior of a WriteRequest.
+type WriteAction int32
+
+const (
+	// WriteActionStat instructs the writer to return the current status while
+	// holding the lock on the write.
+	WriteActionStat WriteAction = 0
+	// WriteActionWrite sets the action for the write request to write data.
+	//
+	// Any data included will be written at the provided offset. The
+	// transaction will be left open for further writes.
+	//
+	// This is the default.
+	WriteActionWrite WriteAction = 1
+	// WriteActionCommit will write any outstanding data in the message and
+	// commit the write, storing it under the digest.
+	//
+	// This can be used in a single message to send the data, verify it and
+	// commit it.
+	//
+	// This action will always terminate the write.
+	WriteActionCommit WriteAction = 2
+)
+
+var WriteAction_name = map[int32]string{
+	0: "STAT",
+	1: "WRITE",
+	2: "COMMIT",
+}
+var WriteAction_value = map[string]int32{
+	"STAT":   0,
+	"WRITE":  1,
+	"COMMIT": 2,
+}
+
+func (x WriteAction) String() string {
+	return proto.EnumName(WriteAction_name, int32(x))
+}
+func (WriteAction) EnumDescriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
+
+type Info struct {
+	// Digest is the hash identity of the blob.
+	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	// Size is the total number of bytes in the blob.
+	Size_ int64 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
+	// CreatedAt provides the time at which the blob was committed.
+	CreatedAt time.Time `protobuf:"bytes,3,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	// UpdatedAt provides the time the info was last updated.
+	UpdatedAt time.Time `protobuf:"bytes,4,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *Info) Reset()                    { *m = Info{} }
+func (*Info) ProtoMessage()               {}
+func (*Info) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
+
+type InfoRequest struct {
+	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+}
+
+func (m *InfoRequest) Reset()                    { *m = InfoRequest{} }
+func (*InfoRequest) ProtoMessage()               {}
+func (*InfoRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{1} }
+
+type InfoResponse struct {
+	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+}
+
+func (m *InfoResponse) Reset()                    { *m = InfoResponse{} }
+func (*InfoResponse) ProtoMessage()               {}
+func (*InfoResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{2} }
+
+type UpdateRequest struct {
+	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	//
+	// In info, Digest, Size, and CreatedAt are immutable,
+	// other field may be updated using this mask.
+	// If no mask is provided, all mutable field are updated.
+	UpdateMask *google_protobuf1.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+}
+
+func (m *UpdateRequest) Reset()                    { *m = UpdateRequest{} }
+func (*UpdateRequest) ProtoMessage()               {}
+func (*UpdateRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{3} }
+
+type UpdateResponse struct {
+	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+}
+
+func (m *UpdateResponse) Reset()                    { *m = UpdateResponse{} }
+func (*UpdateResponse) ProtoMessage()               {}
+func (*UpdateResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{4} }
+
+type ListContentRequest struct {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, containers that match the following will be
+	// returned:
+	//
+	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListContentRequest) Reset()                    { *m = ListContentRequest{} }
+func (*ListContentRequest) ProtoMessage()               {}
+func (*ListContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{5} }
+
+type ListContentResponse struct {
+	Info []Info `protobuf:"bytes,1,rep,name=info" json:"info"`
+}
+
+func (m *ListContentResponse) Reset()                    { *m = ListContentResponse{} }
+func (*ListContentResponse) ProtoMessage()               {}
+func (*ListContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{6} }
+
+type DeleteContentRequest struct {
+	// Digest specifies which content to delete.
+	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+}
+
+func (m *DeleteContentRequest) Reset()                    { *m = DeleteContentRequest{} }
+func (*DeleteContentRequest) ProtoMessage()               {}
+func (*DeleteContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{7} }
+
+// ReadContentRequest defines the fields that make up a request to read a portion of
+// data from a stored object.
+type ReadContentRequest struct {
+	// Digest is the hash identity to read.
+	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	// Offset specifies the number of bytes from the start at which to begin
+	// the read. If zero or less, the read will be from the start. This uses
+	// standard zero-indexed semantics.
+	Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"`
+	// size is the total size of the read. If zero, the entire blob will be
+	// returned by the service.
+	Size_ int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+}
+
+func (m *ReadContentRequest) Reset()                    { *m = ReadContentRequest{} }
+func (*ReadContentRequest) ProtoMessage()               {}
+func (*ReadContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{8} }
+
+// ReadContentResponse carries byte data for a read request.
+type ReadContentResponse struct {
+	Offset int64  `protobuf:"varint,1,opt,name=offset,proto3" json:"offset,omitempty"`
+	Data   []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+}
+
+func (m *ReadContentResponse) Reset()                    { *m = ReadContentResponse{} }
+func (*ReadContentResponse) ProtoMessage()               {}
+func (*ReadContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{9} }
+
+type Status struct {
+	StartedAt time.Time                                  `protobuf:"bytes,1,opt,name=started_at,json=startedAt,stdtime" json:"started_at"`
+	UpdatedAt time.Time                                  `protobuf:"bytes,2,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	Ref       string                                     `protobuf:"bytes,3,opt,name=ref,proto3" json:"ref,omitempty"`
+	Offset    int64                                      `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
+	Total     int64                                      `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
+	Expected  github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"`
+}
+
+func (m *Status) Reset()                    { *m = Status{} }
+func (*Status) ProtoMessage()               {}
+func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{10} }
+
+type StatusRequest struct {
+	Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+}
+
+func (m *StatusRequest) Reset()                    { *m = StatusRequest{} }
+func (*StatusRequest) ProtoMessage()               {}
+func (*StatusRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{11} }
+
+type StatusResponse struct {
+	Status *Status `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"`
+}
+
+func (m *StatusResponse) Reset()                    { *m = StatusResponse{} }
+func (*StatusResponse) ProtoMessage()               {}
+func (*StatusResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{12} }
+
+type ListStatusesRequest struct {
+	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListStatusesRequest) Reset()                    { *m = ListStatusesRequest{} }
+func (*ListStatusesRequest) ProtoMessage()               {}
+func (*ListStatusesRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{13} }
+
+type ListStatusesResponse struct {
+	Statuses []Status `protobuf:"bytes,1,rep,name=statuses" json:"statuses"`
+}
+
+func (m *ListStatusesResponse) Reset()                    { *m = ListStatusesResponse{} }
+func (*ListStatusesResponse) ProtoMessage()               {}
+func (*ListStatusesResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{14} }
+
+// WriteContentRequest writes data to the request ref at offset.
+type WriteContentRequest struct {
+	// Action sets the behavior of the write.
+	//
+	// When this is a write and the ref is not yet allocated, the ref will be
+	// allocated and the data will be written at offset.
+	//
+	// If the action is write and the ref is allocated, it will accept data to
+	// an offset that has not yet been written.
+	//
+	// If the action is write and there is no data, the current write status
+	// will be returned. This works differently from status because the stream
+	// holds a lock.
+	Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"`
+	// Ref identifies the pre-commit object to write to.
+	Ref string `protobuf:"bytes,2,opt,name=ref,proto3" json:"ref,omitempty"`
+	// Total can be set to have the service validate the total size of the
+	// committed content.
+	//
+	// The latest value before or with the commit action message will be use to
+	// validate the content. If the offset overflows total, the service may
+	// report an error. It is only required on one message for the write.
+	//
+	// If the value is zero or less, no validation of the final content will be
+	// performed.
+	Total int64 `protobuf:"varint,3,opt,name=total,proto3" json:"total,omitempty"`
+	// Expected can be set to have the service validate the final content against
+	// the provided digest.
+	//
+	// If the digest is already present in the object store, an AlreadyExists
+	// error will be returned.
+	//
+	// Only the latest version will be used to check the content against the
+	// digest. It is only required to include it on a single message, before or
+	// with the commit action message.
+	Expected github_com_opencontainers_go_digest.Digest `protobuf:"bytes,4,opt,name=expected,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"expected"`
+	// Offset specifies the number of bytes from the start at which to begin
+	// the write. For most implementations, this means from the start of the
+	// file. This uses standard, zero-indexed semantics.
+	//
+	// If the action is write, the remote may remove all previously written
+	// data after the offset. Implementations may support arbitrary offsets but
+	// MUST support reseting this value to zero with a write. If an
+	// implementation does not support a write at a particular offset, an
+	// OutOfRange error must be returned.
+	Offset int64 `protobuf:"varint,5,opt,name=offset,proto3" json:"offset,omitempty"`
+	// Data is the actual bytes to be written.
+	//
+	// If this is empty and the message is not a commit, a response will be
+	// returned with the current write state.
+	Data []byte `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"`
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *WriteContentRequest) Reset()                    { *m = WriteContentRequest{} }
+func (*WriteContentRequest) ProtoMessage()               {}
+func (*WriteContentRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{15} }
+
+// WriteContentResponse is returned on the culmination of a write call.
+type WriteContentResponse struct {
+	// Action contains the action for the final message of the stream. A writer
+	// should confirm that they match the intended result.
+	Action WriteAction `protobuf:"varint,1,opt,name=action,proto3,enum=containerd.services.content.v1.WriteAction" json:"action,omitempty"`
+	// StartedAt provides the time at which the write began.
+	//
+	// This must be set for stat and commit write actions. All other write
+	// actions may omit this.
+	StartedAt time.Time `protobuf:"bytes,2,opt,name=started_at,json=startedAt,stdtime" json:"started_at"`
+	// UpdatedAt provides the last time of a successful write.
+	//
+	// This must be set for stat and commit write actions. All other write
+	// actions may omit this.
+	UpdatedAt time.Time `protobuf:"bytes,3,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	// Offset is the current committed size for the write.
+	Offset int64 `protobuf:"varint,4,opt,name=offset,proto3" json:"offset,omitempty"`
+	// Total provides the current, expected total size of the write.
+	//
+	// We include this to provide consistency with the Status structure on the
+	// client writer.
+	//
+	// This is only valid on the Stat and Commit response.
+	Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"`
+	// Digest, if present, includes the digest up to the currently committed
+	// bytes. If action is commit, this field will be set. It is implementation
+	// defined if this is set for other actions.
+	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,6,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+}
+
+func (m *WriteContentResponse) Reset()                    { *m = WriteContentResponse{} }
+func (*WriteContentResponse) ProtoMessage()               {}
+func (*WriteContentResponse) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{16} }
+
+type AbortRequest struct {
+	Ref string `protobuf:"bytes,1,opt,name=ref,proto3" json:"ref,omitempty"`
+}
+
+func (m *AbortRequest) Reset()                    { *m = AbortRequest{} }
+func (*AbortRequest) ProtoMessage()               {}
+func (*AbortRequest) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{17} }
+
+func init() {
+	proto.RegisterType((*Info)(nil), "containerd.services.content.v1.Info")
+	proto.RegisterType((*InfoRequest)(nil), "containerd.services.content.v1.InfoRequest")
+	proto.RegisterType((*InfoResponse)(nil), "containerd.services.content.v1.InfoResponse")
+	proto.RegisterType((*UpdateRequest)(nil), "containerd.services.content.v1.UpdateRequest")
+	proto.RegisterType((*UpdateResponse)(nil), "containerd.services.content.v1.UpdateResponse")
+	proto.RegisterType((*ListContentRequest)(nil), "containerd.services.content.v1.ListContentRequest")
+	proto.RegisterType((*ListContentResponse)(nil), "containerd.services.content.v1.ListContentResponse")
+	proto.RegisterType((*DeleteContentRequest)(nil), "containerd.services.content.v1.DeleteContentRequest")
+	proto.RegisterType((*ReadContentRequest)(nil), "containerd.services.content.v1.ReadContentRequest")
+	proto.RegisterType((*ReadContentResponse)(nil), "containerd.services.content.v1.ReadContentResponse")
+	proto.RegisterType((*Status)(nil), "containerd.services.content.v1.Status")
+	proto.RegisterType((*StatusRequest)(nil), "containerd.services.content.v1.StatusRequest")
+	proto.RegisterType((*StatusResponse)(nil), "containerd.services.content.v1.StatusResponse")
+	proto.RegisterType((*ListStatusesRequest)(nil), "containerd.services.content.v1.ListStatusesRequest")
+	proto.RegisterType((*ListStatusesResponse)(nil), "containerd.services.content.v1.ListStatusesResponse")
+	proto.RegisterType((*WriteContentRequest)(nil), "containerd.services.content.v1.WriteContentRequest")
+	proto.RegisterType((*WriteContentResponse)(nil), "containerd.services.content.v1.WriteContentResponse")
+	proto.RegisterType((*AbortRequest)(nil), "containerd.services.content.v1.AbortRequest")
+	proto.RegisterEnum("containerd.services.content.v1.WriteAction", WriteAction_name, WriteAction_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Content service
+
+type ContentClient interface {
+	// Info returns information about a committed object.
+	//
+	// This call can be used for getting the size of content and checking for
+	// existence.
+	Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error)
+	// Update updates content metadata.
+	//
+	// This call can be used to manage the mutable content labels. The
+	// immutable metadata such as digest, size, and committed at cannot
+	// be updated.
+	Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error)
+	// List streams the entire set of content as Info objects and closes the
+	// stream.
+	//
+	// Typically, this will yield a large response, chunked into messages.
+	// Clients should make provisions to ensure they can handle the entire data
+	// set.
+	List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error)
+	// Delete will delete the referenced object.
+	Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error)
+	// Read allows one to read an object based on the offset into the content.
+	//
+	// The requested data may be returned in one or more messages.
+	Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error)
+	// Status returns the status for a single reference.
+	Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+	// ListStatuses returns the status of ongoing object ingestions, started via
+	// Write.
+	//
+	// Only those matching the regular expression will be provided in the
+	// response. If the provided regular expression is empty, all ingestions
+	// will be provided.
+	ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error)
+	// Write begins or resumes writes to a resource identified by a unique ref.
+	// Only one active stream may exist at a time for each ref.
+	//
+	// Once a write stream has started, it may only write to a single ref, thus
+	// once a stream is started, the ref may be ommitted on subsequent writes.
+	//
+	// For any write transaction represented by a ref, only a single write may
+	// be made to a given offset. If overlapping writes occur, it is an error.
+	// Writes should be sequential and implementations may throw an error if
+	// this is required.
+	//
+	// If expected_digest is set and already part of the content store, the
+	// write will fail.
+	//
+	// When completed, the commit flag should be set to true. If expected size
+	// or digest is set, the content will be validated against those values.
+	Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error)
+	// Abort cancels the ongoing write named in the request. Any resources
+	// associated with the write will be collected.
+	Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error)
+}
+
+type contentClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewContentClient(cc *grpc.ClientConn) ContentClient {
+	return &contentClient{cc}
+}
+
+func (c *contentClient) Info(ctx context.Context, in *InfoRequest, opts ...grpc.CallOption) (*InfoResponse, error) {
+	out := new(InfoResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Info", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error) {
+	out := new(UpdateResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Update", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) List(ctx context.Context, in *ListContentRequest, opts ...grpc.CallOption) (Content_ListClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[0], c.cc, "/containerd.services.content.v1.Content/List", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &contentListClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Content_ListClient interface {
+	Recv() (*ListContentResponse, error)
+	grpc.ClientStream
+}
+
+type contentListClient struct {
+	grpc.ClientStream
+}
+
+func (x *contentListClient) Recv() (*ListContentResponse, error) {
+	m := new(ListContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *contentClient) Delete(ctx context.Context, in *DeleteContentRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) {
+	out := new(google_protobuf3.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Delete", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) Read(ctx context.Context, in *ReadContentRequest, opts ...grpc.CallOption) (Content_ReadClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[1], c.cc, "/containerd.services.content.v1.Content/Read", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &contentReadClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Content_ReadClient interface {
+	Recv() (*ReadContentResponse, error)
+	grpc.ClientStream
+}
+
+type contentReadClient struct {
+	grpc.ClientStream
+}
+
+func (x *contentReadClient) Recv() (*ReadContentResponse, error) {
+	m := new(ReadContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *contentClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+	out := new(StatusResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Status", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) ListStatuses(ctx context.Context, in *ListStatusesRequest, opts ...grpc.CallOption) (*ListStatusesResponse, error) {
+	out := new(ListStatusesResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/ListStatuses", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *contentClient) Write(ctx context.Context, opts ...grpc.CallOption) (Content_WriteClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_Content_serviceDesc.Streams[2], c.cc, "/containerd.services.content.v1.Content/Write", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &contentWriteClient{stream}
+	return x, nil
+}
+
+type Content_WriteClient interface {
+	Send(*WriteContentRequest) error
+	Recv() (*WriteContentResponse, error)
+	grpc.ClientStream
+}
+
+type contentWriteClient struct {
+	grpc.ClientStream
+}
+
+func (x *contentWriteClient) Send(m *WriteContentRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *contentWriteClient) Recv() (*WriteContentResponse, error) {
+	m := new(WriteContentResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *contentClient) Abort(ctx context.Context, in *AbortRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) {
+	out := new(google_protobuf3.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.content.v1.Content/Abort", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Content service
+
+type ContentServer interface {
+	// Info returns information about a committed object.
+	//
+	// This call can be used for getting the size of content and checking for
+	// existence.
+	Info(context.Context, *InfoRequest) (*InfoResponse, error)
+	// Update updates content metadata.
+	//
+	// This call can be used to manage the mutable content labels. The
+	// immutable metadata such as digest, size, and committed at cannot
+	// be updated.
+	Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
+	// List streams the entire set of content as Info objects and closes the
+	// stream.
+	//
+	// Typically, this will yield a large response, chunked into messages.
+	// Clients should make provisions to ensure they can handle the entire data
+	// set.
+	List(*ListContentRequest, Content_ListServer) error
+	// Delete will delete the referenced object.
+	Delete(context.Context, *DeleteContentRequest) (*google_protobuf3.Empty, error)
+	// Read allows one to read an object based on the offset into the content.
+	//
+	// The requested data may be returned in one or more messages.
+	Read(*ReadContentRequest, Content_ReadServer) error
+	// Status returns the status for a single reference.
+	Status(context.Context, *StatusRequest) (*StatusResponse, error)
+	// ListStatuses returns the status of ongoing object ingestions, started via
+	// Write.
+	//
+	// Only those matching the regular expression will be provided in the
+	// response. If the provided regular expression is empty, all ingestions
+	// will be provided.
+	ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error)
+	// Write begins or resumes writes to a resource identified by a unique ref.
+	// Only one active stream may exist at a time for each ref.
+	//
+	// Once a write stream has started, it may only write to a single ref, thus
+	// once a stream is started, the ref may be ommitted on subsequent writes.
+	//
+	// For any write transaction represented by a ref, only a single write may
+	// be made to a given offset. If overlapping writes occur, it is an error.
+	// Writes should be sequential and implementations may throw an error if
+	// this is required.
+	//
+	// If expected_digest is set and already part of the content store, the
+	// write will fail.
+	//
+	// When completed, the commit flag should be set to true. If expected size
+	// or digest is set, the content will be validated against those values.
+	Write(Content_WriteServer) error
+	// Abort cancels the ongoing write named in the request. Any resources
+	// associated with the write will be collected.
+	Abort(context.Context, *AbortRequest) (*google_protobuf3.Empty, error)
+}
+
+func RegisterContentServer(s *grpc.Server, srv ContentServer) {
+	s.RegisterService(&_Content_serviceDesc, srv)
+}
+
+func _Content_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(InfoRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Info(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Info",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Info(ctx, req.(*InfoRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Update(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Update",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Update(ctx, req.(*UpdateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_List_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(ListContentRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(ContentServer).List(m, &contentListServer{stream})
+}
+
+type Content_ListServer interface {
+	Send(*ListContentResponse) error
+	grpc.ServerStream
+}
+
+type contentListServer struct {
+	grpc.ServerStream
+}
+
+func (x *contentListServer) Send(m *ListContentResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Content_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteContentRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Delete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Delete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Delete(ctx, req.(*DeleteContentRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_Read_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(ReadContentRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(ContentServer).Read(m, &contentReadServer{stream})
+}
+
+type Content_ReadServer interface {
+	Send(*ReadContentResponse) error
+	grpc.ServerStream
+}
+
+type contentReadServer struct {
+	grpc.ServerStream
+}
+
+func (x *contentReadServer) Send(m *ReadContentResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Content_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StatusRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Status(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Status",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Status(ctx, req.(*StatusRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_ListStatuses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListStatusesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).ListStatuses(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/ListStatuses",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).ListStatuses(ctx, req.(*ListStatusesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Content_Write_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(ContentServer).Write(&contentWriteServer{stream})
+}
+
+type Content_WriteServer interface {
+	Send(*WriteContentResponse) error
+	Recv() (*WriteContentRequest, error)
+	grpc.ServerStream
+}
+
+type contentWriteServer struct {
+	grpc.ServerStream
+}
+
+func (x *contentWriteServer) Send(m *WriteContentResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *contentWriteServer) Recv() (*WriteContentRequest, error) {
+	m := new(WriteContentRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func _Content_Abort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AbortRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ContentServer).Abort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.content.v1.Content/Abort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ContentServer).Abort(ctx, req.(*AbortRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Content_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.content.v1.Content",
+	HandlerType: (*ContentServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Info",
+			Handler:    _Content_Info_Handler,
+		},
+		{
+			MethodName: "Update",
+			Handler:    _Content_Update_Handler,
+		},
+		{
+			MethodName: "Delete",
+			Handler:    _Content_Delete_Handler,
+		},
+		{
+			MethodName: "Status",
+			Handler:    _Content_Status_Handler,
+		},
+		{
+			MethodName: "ListStatuses",
+			Handler:    _Content_ListStatuses_Handler,
+		},
+		{
+			MethodName: "Abort",
+			Handler:    _Content_Abort_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "List",
+			Handler:       _Content_List_Handler,
+			ServerStreams: true,
+		},
+		{
+			StreamName:    "Read",
+			Handler:       _Content_Read_Handler,
+			ServerStreams: true,
+		},
+		{
+			StreamName:    "Write",
+			Handler:       _Content_Write_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "github.com/containerd/containerd/api/services/content/v1/content.proto",
+}
+
+func (m *Info) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Info) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Digest) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+		i += copy(dAtA[i:], m.Digest)
+	}
+	if m.Size_ != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Size_))
+	}
+	dAtA[i] = 0x1a
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+	dAtA[i] = 0x22
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x2a
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v)))
+			i = encodeVarintContent(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintContent(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintContent(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *InfoRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *InfoRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Digest) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+		i += copy(dAtA[i:], m.Digest)
+	}
+	return i, nil
+}
+
+func (m *InfoResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *InfoResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(m.Info.Size()))
+	n3, err := m.Info.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	return i, nil
+}
+
+func (m *UpdateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(m.Info.Size()))
+	n4, err := m.Info.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n4
+	if m.UpdateMask != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.UpdateMask.Size()))
+		n5, err := m.UpdateMask.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n5
+	}
+	return i, nil
+}
+
+func (m *UpdateResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(m.Info.Size()))
+	n6, err := m.Info.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n6
+	return i, nil
+}
+
+func (m *ListContentRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListContentRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			dAtA[i] = 0xa
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	return i, nil
+}
+
+func (m *ListContentResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListContentResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Info) > 0 {
+		for _, msg := range m.Info {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintContent(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *DeleteContentRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteContentRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Digest) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+		i += copy(dAtA[i:], m.Digest)
+	}
+	return i, nil
+}
+
+func (m *ReadContentRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ReadContentRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Digest) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+		i += copy(dAtA[i:], m.Digest)
+	}
+	if m.Offset != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+	}
+	if m.Size_ != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Size_))
+	}
+	return i, nil
+}
+
+func (m *ReadContentResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ReadContentResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Offset != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+	}
+	if len(m.Data) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Data)))
+		i += copy(dAtA[i:], m.Data)
+	}
+	return i, nil
+}
+
+func (m *Status) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Status) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
+	n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n7
+	dAtA[i] = 0x12
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n8
+	if len(m.Ref) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
+		i += copy(dAtA[i:], m.Ref)
+	}
+	if m.Offset != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+	}
+	if m.Total != 0 {
+		dAtA[i] = 0x28
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Total))
+	}
+	if len(m.Expected) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Expected)))
+		i += copy(dAtA[i:], m.Expected)
+	}
+	return i, nil
+}
+
+func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Ref) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
+		i += copy(dAtA[i:], m.Ref)
+	}
+	return i, nil
+}
+
+func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Status != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Status.Size()))
+		n9, err := m.Status.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n9
+	}
+	return i, nil
+}
+
+func (m *ListStatusesRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListStatusesRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			dAtA[i] = 0xa
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	return i, nil
+}
+
+func (m *ListStatusesResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListStatusesResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Statuses) > 0 {
+		for _, msg := range m.Statuses {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintContent(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *WriteContentRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WriteContentRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Action != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Action))
+	}
+	if len(m.Ref) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
+		i += copy(dAtA[i:], m.Ref)
+	}
+	if m.Total != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Total))
+	}
+	if len(m.Expected) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Expected)))
+		i += copy(dAtA[i:], m.Expected)
+	}
+	if m.Offset != 0 {
+		dAtA[i] = 0x28
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+	}
+	if len(m.Data) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Data)))
+		i += copy(dAtA[i:], m.Data)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x3a
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v)))
+			i = encodeVarintContent(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintContent(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintContent(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *WriteContentResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WriteContentResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Action != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Action))
+	}
+	dAtA[i] = 0x12
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)))
+	n10, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.StartedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n10
+	dAtA[i] = 0x1a
+	i++
+	i = encodeVarintContent(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n11, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n11
+	if m.Offset != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Offset))
+	}
+	if m.Total != 0 {
+		dAtA[i] = 0x28
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(m.Total))
+	}
+	if len(m.Digest) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+		i += copy(dAtA[i:], m.Digest)
+	}
+	return i, nil
+}
+
+func (m *AbortRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AbortRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Ref) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Ref)))
+		i += copy(dAtA[i:], m.Ref)
+	}
+	return i, nil
+}
+
+func encodeFixed64Content(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Content(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Info) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Digest)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	if m.Size_ != 0 {
+		n += 1 + sovContent(uint64(m.Size_))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
+	n += 1 + l + sovContent(uint64(l))
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
+	n += 1 + l + sovContent(uint64(l))
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v)))
+			n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *InfoRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Digest)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func (m *InfoResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Info.Size()
+	n += 1 + l + sovContent(uint64(l))
+	return n
+}
+
+func (m *UpdateRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Info.Size()
+	n += 1 + l + sovContent(uint64(l))
+	if m.UpdateMask != nil {
+		l = m.UpdateMask.Size()
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func (m *UpdateResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Info.Size()
+	n += 1 + l + sovContent(uint64(l))
+	return n
+}
+
+func (m *ListContentRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			l = len(s)
+			n += 1 + l + sovContent(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ListContentResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Info) > 0 {
+		for _, e := range m.Info {
+			l = e.Size()
+			n += 1 + l + sovContent(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *DeleteContentRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Digest)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func (m *ReadContentRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Digest)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	if m.Offset != 0 {
+		n += 1 + sovContent(uint64(m.Offset))
+	}
+	if m.Size_ != 0 {
+		n += 1 + sovContent(uint64(m.Size_))
+	}
+	return n
+}
+
+func (m *ReadContentResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Offset != 0 {
+		n += 1 + sovContent(uint64(m.Offset))
+	}
+	l = len(m.Data)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func (m *Status) Size() (n int) {
+	var l int
+	_ = l
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)
+	n += 1 + l + sovContent(uint64(l))
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
+	n += 1 + l + sovContent(uint64(l))
+	l = len(m.Ref)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	if m.Offset != 0 {
+		n += 1 + sovContent(uint64(m.Offset))
+	}
+	if m.Total != 0 {
+		n += 1 + sovContent(uint64(m.Total))
+	}
+	l = len(m.Expected)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func (m *StatusRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Ref)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func (m *StatusResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Status != nil {
+		l = m.Status.Size()
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func (m *ListStatusesRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			l = len(s)
+			n += 1 + l + sovContent(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ListStatusesResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Statuses) > 0 {
+		for _, e := range m.Statuses {
+			l = e.Size()
+			n += 1 + l + sovContent(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *WriteContentRequest) Size() (n int) {
+	var l int
+	_ = l
+	if m.Action != 0 {
+		n += 1 + sovContent(uint64(m.Action))
+	}
+	l = len(m.Ref)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	if m.Total != 0 {
+		n += 1 + sovContent(uint64(m.Total))
+	}
+	l = len(m.Expected)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	if m.Offset != 0 {
+		n += 1 + sovContent(uint64(m.Offset))
+	}
+	l = len(m.Data)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovContent(uint64(len(k))) + 1 + len(v) + sovContent(uint64(len(v)))
+			n += mapEntrySize + 1 + sovContent(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *WriteContentResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Action != 0 {
+		n += 1 + sovContent(uint64(m.Action))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.StartedAt)
+	n += 1 + l + sovContent(uint64(l))
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
+	n += 1 + l + sovContent(uint64(l))
+	if m.Offset != 0 {
+		n += 1 + sovContent(uint64(m.Offset))
+	}
+	if m.Total != 0 {
+		n += 1 + sovContent(uint64(m.Total))
+	}
+	l = len(m.Digest)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func (m *AbortRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Ref)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func sovContent(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozContent(x uint64) (n int) {
+	return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Info) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&Info{`,
+		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *InfoRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&InfoRequest{`,
+		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *InfoResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&InfoResponse{`,
+		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateRequest{`,
+		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf1.FieldMask", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateResponse{`,
+		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListContentRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListContentRequest{`,
+		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListContentResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListContentResponse{`,
+		`Info:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Info), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteContentRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteContentRequest{`,
+		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ReadContentRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ReadContentRequest{`,
+		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
+		`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ReadContentResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ReadContentResponse{`,
+		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
+		`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Status) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Status{`,
+		`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
+		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
+		`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
+		`Expected:` + fmt.Sprintf("%v", this.Expected) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StatusRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StatusRequest{`,
+		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StatusResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StatusResponse{`,
+		`Status:` + strings.Replace(fmt.Sprintf("%v", this.Status), "Status", "Status", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListStatusesRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListStatusesRequest{`,
+		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListStatusesResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListStatusesResponse{`,
+		`Statuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Statuses), "Status", "Status", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WriteContentRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&WriteContentRequest{`,
+		`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
+		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
+		`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
+		`Expected:` + fmt.Sprintf("%v", this.Expected) + `,`,
+		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
+		`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WriteContentResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WriteContentResponse{`,
+		`Action:` + fmt.Sprintf("%v", this.Action) + `,`,
+		`StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Offset:` + fmt.Sprintf("%v", this.Offset) + `,`,
+		`Total:` + fmt.Sprintf("%v", this.Total) + `,`,
+		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *AbortRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&AbortRequest{`,
+		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringContent(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Info) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Info: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+			}
+			m.Size_ = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Size_ |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthContent
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContent
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContent
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthContent
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *InfoRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: InfoRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: InfoRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *InfoResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: InfoResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: InfoResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.UpdateMask == nil {
+				m.UpdateMask = &google_protobuf1.FieldMask{}
+			}
+			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListContentRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListContentRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListContentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListContentResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListContentResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListContentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Info = append(m.Info, Info{})
+			if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteContentRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteContentRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ReadContentRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ReadContentRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ReadContentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+			}
+			m.Offset = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Offset |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+			}
+			m.Size_ = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Size_ |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ReadContentResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ReadContentResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ReadContentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+			}
+			m.Offset = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Offset |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Status) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Status: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ref = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+			}
+			m.Offset = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Offset |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
+			}
+			m.Total = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Total |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ref = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Status == nil {
+				m.Status = &Status{}
+			}
+			if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListStatusesRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListStatusesRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListStatusesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListStatusesResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListStatusesResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListStatusesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Statuses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Statuses = append(m.Statuses, Status{})
+			if err := m.Statuses[len(m.Statuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WriteContentRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WriteContentRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WriteContentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+			}
+			m.Action = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Action |= (WriteAction(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ref = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
+			}
+			m.Total = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Total |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expected", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Expected = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+			}
+			m.Offset = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Offset |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthContent
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContent
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContent
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthContent
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WriteContentResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WriteContentResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WriteContentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+			}
+			m.Action = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Action |= (WriteAction(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.StartedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Offset", wireType)
+			}
+			m.Offset = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Offset |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Total", wireType)
+			}
+			m.Total = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Total |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AbortRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AbortRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AbortRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ref = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipContent(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthContent
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowContent
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipContent(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowContent   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/content/v1/content.proto", fileDescriptorContent)
+}
+
+var fileDescriptorContent = []byte{
+	// 1079 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcd, 0x6f, 0x1b, 0x45,
+	0x14, 0xcf, 0x78, 0xed, 0x4d, 0xf2, 0x9c, 0x16, 0x33, 0x31, 0x95, 0xb5, 0x08, 0x7b, 0xbb, 0x42,
+	0xc8, 0x6a, 0xc9, 0x3a, 0x75, 0x7a, 0x00, 0x2a, 0x01, 0x8e, 0x9b, 0xaa, 0x41, 0x4d, 0x41, 0x5b,
+	0x97, 0x88, 0x5e, 0xca, 0xda, 0x1e, 0x9b, 0x55, 0x6c, 0xaf, 0xbb, 0x33, 0xb6, 0x08, 0x27, 0x2e,
+	0x48, 0x28, 0xea, 0x01, 0x71, 0xcf, 0x05, 0xf8, 0x2b, 0x38, 0x70, 0xce, 0x91, 0x23, 0xe2, 0xd0,
+	0xd2, 0xfc, 0x0f, 0xdc, 0xd1, 0xcc, 0xce, 0xda, 0xeb, 0x8f, 0xb0, 0xb6, 0xe3, 0x9e, 0xfc, 0x66,
+	0xf6, 0xfd, 0xde, 0xf7, 0xc7, 0x18, 0xee, 0x35, 0x1d, 0xf6, 0x4d, 0xaf, 0x6a, 0xd6, 0xdc, 0x76,
+	0xa1, 0xe6, 0x76, 0x98, 0xed, 0x74, 0x88, 0x57, 0x0f, 0x93, 0x76, 0xd7, 0x29, 0x50, 0xe2, 0xf5,
+	0x9d, 0x1a, 0xa1, 0xe2, 0x9e, 0x74, 0x58, 0xa1, 0x7f, 0x2b, 0x20, 0xcd, 0xae, 0xe7, 0x32, 0x17,
+	0x67, 0x87, 0x08, 0x33, 0xe0, 0x36, 0x03, 0x96, 0xfe, 0x2d, 0x2d, 0xdd, 0x74, 0x9b, 0xae, 0x60,
+	0x2d, 0x70, 0xca, 0x47, 0x69, 0x7a, 0xd3, 0x75, 0x9b, 0x2d, 0x52, 0x10, 0xa7, 0x6a, 0xaf, 0x51,
+	0x68, 0x38, 0xa4, 0x55, 0x7f, 0xda, 0xb6, 0xe9, 0x91, 0xe4, 0xc8, 0x8d, 0x73, 0x30, 0xa7, 0x4d,
+	0x28, 0xb3, 0xdb, 0x5d, 0xc9, 0xf0, 0xf6, 0x38, 0x03, 0x69, 0x77, 0xd9, 0xb1, 0xff, 0xd1, 0xf8,
+	0x37, 0x06, 0xf1, 0xfd, 0x4e, 0xc3, 0xc5, 0x9f, 0x81, 0x5a, 0x77, 0x9a, 0x84, 0xb2, 0x0c, 0xd2,
+	0x51, 0x7e, 0x7d, 0xb7, 0x78, 0xf6, 0x22, 0xb7, 0xf2, 0xf7, 0x8b, 0xdc, 0x8d, 0x90, 0xfb, 0x6e,
+	0x97, 0x74, 0x06, 0x5e, 0xd0, 0x42, 0xd3, 0xdd, 0xf2, 0x21, 0xe6, 0x5d, 0xf1, 0x63, 0x49, 0x09,
+	0x18, 0x43, 0x9c, 0x3a, 0xdf, 0x91, 0x4c, 0x4c, 0x47, 0x79, 0xc5, 0x12, 0x34, 0x2e, 0x03, 0xd4,
+	0x3c, 0x62, 0x33, 0x52, 0x7f, 0x6a, 0xb3, 0x8c, 0xa2, 0xa3, 0x7c, 0xb2, 0xa8, 0x99, 0xbe, 0x69,
+	0x66, 0x60, 0x9a, 0x59, 0x09, 0x6c, 0xdf, 0x5d, 0xe3, 0xfa, 0x7f, 0x7a, 0x99, 0x43, 0xd6, 0xba,
+	0xc4, 0x95, 0x18, 0x17, 0xd2, 0xeb, 0xd6, 0x03, 0x21, 0xf1, 0x79, 0x84, 0x48, 0x5c, 0x89, 0xe1,
+	0xfb, 0xa0, 0xb6, 0xec, 0x2a, 0x69, 0xd1, 0x4c, 0x42, 0x57, 0xf2, 0xc9, 0xe2, 0xb6, 0xf9, 0xff,
+	0x99, 0x31, 0x79, 0x7c, 0xcc, 0x07, 0x02, 0xb2, 0xd7, 0x61, 0xde, 0xb1, 0x25, 0xf1, 0xda, 0x87,
+	0x90, 0x0c, 0x5d, 0xe3, 0x14, 0x28, 0x47, 0xe4, 0xd8, 0x8f, 0x9f, 0xc5, 0x49, 0x9c, 0x86, 0x44,
+	0xdf, 0x6e, 0xf5, 0xfc, 0x48, 0xac, 0x5b, 0xfe, 0xe1, 0xa3, 0xd8, 0x07, 0xc8, 0xf8, 0x0a, 0x92,
+	0x5c, 0xac, 0x45, 0x9e, 0xf5, 0x78, 0xc4, 0x96, 0x18, 0x7d, 0xe3, 0x21, 0x6c, 0xf8, 0xa2, 0x69,
+	0xd7, 0xed, 0x50, 0x82, 0x3f, 0x86, 0xb8, 0xd3, 0x69, 0xb8, 0x42, 0x72, 0xb2, 0xf8, 0xee, 0x2c,
+	0xde, 0xee, 0xc6, 0xb9, 0x7e, 0x4b, 0xe0, 0x8c, 0xe7, 0x08, 0xae, 0x3c, 0x16, 0xd1, 0x0b, 0xac,
+	0xbd, 0xa4, 0x44, 0x7c, 0x07, 0x92, 0x7e, 0x3a, 0x44, 0x1d, 0x8b, 0xe0, 0x4c, 0xcb, 0xe3, 0x3d,
+	0x5e, 0xea, 0x07, 0x36, 0x3d, 0xb2, 0x64, 0xd6, 0x39, 0x6d, 0x7c, 0x01, 0x57, 0x03, 0x6b, 0x96,
+	0xe4, 0xa0, 0x09, 0xf8, 0x81, 0x43, 0x59, 0xd9, 0x67, 0x09, 0x9c, 0xcc, 0xc0, 0x6a, 0xc3, 0x69,
+	0x31, 0xe2, 0xd1, 0x0c, 0xd2, 0x95, 0xfc, 0xba, 0x15, 0x1c, 0x8d, 0xc7, 0xb0, 0x39, 0xc2, 0x3f,
+	0x61, 0x86, 0xb2, 0x90, 0x19, 0x55, 0x48, 0xdf, 0x25, 0x2d, 0xc2, 0xc8, 0x98, 0x21, 0xcb, 0xac,
+	0x8d, 0xe7, 0x08, 0xb0, 0x45, 0xec, 0xfa, 0xeb, 0x53, 0x81, 0xaf, 0x81, 0xea, 0x36, 0x1a, 0x94,
+	0x30, 0xd9, 0xfe, 0xf2, 0x34, 0x18, 0x0a, 0xca, 0x70, 0x28, 0x18, 0x25, 0xd8, 0x1c, 0xb1, 0x46,
+	0x46, 0x72, 0x28, 0x02, 0x8d, 0x8b, 0xa8, 0xdb, 0xcc, 0x16, 0x82, 0x37, 0x2c, 0x41, 0x1b, 0xbf,
+	0xc4, 0x40, 0x7d, 0xc4, 0x6c, 0xd6, 0xa3, 0x7c, 0x3a, 0x50, 0x66, 0x7b, 0x72, 0x3a, 0xa0, 0x79,
+	0xa6, 0x83, 0xc4, 0x4d, 0x8c, 0x98, 0xd8, 0x62, 0x23, 0x26, 0x05, 0x8a, 0x47, 0x1a, 0xc2, 0xd5,
+	0x75, 0x8b, 0x93, 0x21, 0x97, 0xe2, 0x23, 0x2e, 0xa5, 0x21, 0xc1, 0x5c, 0x66, 0xb7, 0x32, 0x09,
+	0x71, 0xed, 0x1f, 0xf0, 0x43, 0x58, 0x23, 0xdf, 0x76, 0x49, 0x8d, 0x91, 0x7a, 0x46, 0x5d, 0x38,
+	0x23, 0x03, 0x19, 0xc6, 0x75, 0xb8, 0xe2, 0xc7, 0x28, 0x48, 0xb8, 0x34, 0x10, 0x0d, 0x0c, 0xe4,
+	0x6d, 0x15, 0xb0, 0x0c, 0xea, 0x59, 0xa5, 0xe2, 0x46, 0x86, 0xf2, 0xbd, 0xa8, 0x8a, 0x96, 0x78,
+	0x89, 0x32, 0x0a, 0x7e, 0x9b, 0xf8, 0xb7, 0x84, 0x46, 0xf7, 0xd5, 0xd7, 0x90, 0x1e, 0x05, 0x48,
+	0x43, 0xee, 0xc3, 0x1a, 0x95, 0x77, 0xb2, 0xb9, 0x66, 0x34, 0x45, 0xb6, 0xd7, 0x00, 0x6d, 0xfc,
+	0xac, 0xc0, 0xe6, 0xa1, 0xe7, 0x4c, 0xb4, 0x58, 0x19, 0x54, 0xbb, 0xc6, 0x1c, 0xb7, 0x23, 0x5c,
+	0xbd, 0x5a, 0xbc, 0x19, 0x25, 0x5f, 0x08, 0x29, 0x09, 0x88, 0x25, 0xa1, 0x41, 0x4c, 0x63, 0xc3,
+	0xa4, 0x0f, 0x92, 0xab, 0x5c, 0x94, 0xdc, 0xf8, 0xe5, 0x93, 0x1b, 0x2a, 0xad, 0xc4, 0xd4, 0x6e,
+	0x51, 0x87, 0xdd, 0x82, 0x0f, 0x07, 0xbb, 0x6f, 0x55, 0x04, 0xf2, 0x93, 0x99, 0x1c, 0x1d, 0x8d,
+	0xd6, 0xb2, 0x57, 0xe1, 0xcb, 0x18, 0xa4, 0x47, 0xd5, 0xc8, 0xbc, 0x2f, 0x25, 0x2b, 0xa3, 0x43,
+	0x21, 0xb6, 0x8c, 0xa1, 0xa0, 0x2c, 0x36, 0x14, 0xe6, 0x1b, 0x01, 0xc3, 0x91, 0xac, 0x5e, 0x7a,
+	0xea, 0xeb, 0xb0, 0x51, 0xaa, 0xba, 0x1e, 0xbb, 0xb0, 0xfb, 0x6f, 0xfc, 0x80, 0x20, 0x19, 0x8a,
+	0x1e, 0x7e, 0x07, 0xe2, 0x8f, 0x2a, 0xa5, 0x4a, 0x6a, 0x45, 0xdb, 0x3c, 0x39, 0xd5, 0xdf, 0x08,
+	0x7d, 0xe2, 0x9d, 0x85, 0x73, 0x90, 0x38, 0xb4, 0xf6, 0x2b, 0x7b, 0x29, 0xa4, 0xa5, 0x4f, 0x4e,
+	0xf5, 0x54, 0xe8, 0xbb, 0x20, 0xf1, 0x75, 0x50, 0xcb, 0x9f, 0x1f, 0x1c, 0xec, 0x57, 0x52, 0x31,
+	0xed, 0xad, 0x93, 0x53, 0xfd, 0xcd, 0x10, 0x47, 0xd9, 0x6d, 0xb7, 0x1d, 0xa6, 0x6d, 0xfe, 0xf8,
+	0x6b, 0x76, 0xe5, 0xf7, 0xdf, 0xb2, 0x61, 0xbd, 0xc5, 0x3f, 0x56, 0x61, 0x55, 0x96, 0x01, 0xb6,
+	0xe5, 0xcb, 0xf4, 0xe6, 0x2c, 0x9b, 0x54, 0xba, 0xa6, 0xbd, 0x3f, 0x1b, 0xb3, 0xac, 0xb0, 0x26,
+	0xa8, 0xfe, 0x5b, 0x02, 0x6f, 0x45, 0xe1, 0x46, 0x5e, 0x40, 0x9a, 0x39, 0x2b, 0xbb, 0x54, 0xf4,
+	0x0c, 0xe2, 0x7c, 0xb4, 0xe1, 0x62, 0x14, 0x6e, 0xf2, 0x21, 0xa2, 0xed, 0xcc, 0x85, 0xf1, 0x15,
+	0x6e, 0x23, 0xfc, 0x25, 0xa8, 0xfe, 0x73, 0x02, 0xdf, 0x8e, 0x12, 0x30, 0xed, 0xd9, 0xa1, 0x5d,
+	0x9b, 0xa8, 0xef, 0x3d, 0xfe, 0xbf, 0x81, 0xbb, 0xc2, 0x77, 0x76, 0xb4, 0x2b, 0x93, 0xef, 0x8c,
+	0x68, 0x57, 0xa6, 0xbc, 0x06, 0xb6, 0x11, 0x4f, 0x93, 0x5c, 0xf1, 0x5b, 0x33, 0xee, 0xa0, 0x59,
+	0xd3, 0x34, 0xb6, 0xf2, 0x8e, 0x61, 0x23, 0xbc, 0x81, 0xf0, 0x4c, 0xa1, 0x1f, 0x5b, 0x70, 0xda,
+	0xed, 0xf9, 0x40, 0x52, 0x75, 0x1f, 0x12, 0x7e, 0xeb, 0xec, 0x2c, 0x30, 0x92, 0xa3, 0x75, 0x4e,
+	0x1b, 0xb0, 0x79, 0xb4, 0x8d, 0xf0, 0x01, 0x24, 0xc4, 0x6c, 0xc0, 0x91, 0x9d, 0x13, 0x1e, 0x21,
+	0x17, 0x55, 0xc7, 0xee, 0x93, 0xb3, 0x57, 0xd9, 0x95, 0xbf, 0x5e, 0x65, 0x57, 0xbe, 0x3f, 0xcf,
+	0xa2, 0xb3, 0xf3, 0x2c, 0xfa, 0xf3, 0x3c, 0x8b, 0xfe, 0x39, 0xcf, 0xa2, 0x27, 0x9f, 0x2e, 0xfa,
+	0x3f, 0xfa, 0x8e, 0x24, 0xab, 0xaa, 0xd0, 0xb5, 0xf3, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbf,
+	0xc1, 0xae, 0xf1, 0x92, 0x0f, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
new file mode 100644
index 0000000..a0a41c4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/content/v1/content.proto
@@ -0,0 +1,318 @@
+syntax = "proto3";
+
+package containerd.services.content.v1;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/empty.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/content/v1;content";
+
+// Content provides access to a content addressable storage system.
+service Content {
+	// Info returns information about a committed object.
+	//
+	// This call can be used for getting the size of content and checking for
+	// existence.
+	rpc Info(InfoRequest) returns (InfoResponse);
+
+	// Update updates content metadata.
+	//
+	// This call can be used to manage the mutable content labels. The
+	// immutable metadata such as digest, size, and committed at cannot
+	// be updated.
+	rpc Update(UpdateRequest) returns (UpdateResponse);
+
+	// List streams the entire set of content as Info objects and closes the
+	// stream.
+	//
+	// Typically, this will yield a large response, chunked into messages.
+	// Clients should make provisions to ensure they can handle the entire data
+	// set.
+	rpc List(ListContentRequest) returns (stream ListContentResponse);
+
+	// Delete will delete the referenced object.
+	rpc Delete(DeleteContentRequest) returns (google.protobuf.Empty);
+
+	// Read allows one to read an object based on the offset into the content.
+	//
+	// The requested data may be returned in one or more messages.
+	rpc Read(ReadContentRequest) returns (stream ReadContentResponse);
+
+	// Status returns the status for a single reference.
+	rpc Status(StatusRequest) returns (StatusResponse);
+
+	// ListStatuses returns the status of ongoing object ingestions, started via
+	// Write.
+	//
+	// Only those matching the regular expression will be provided in the
+	// response. If the provided regular expression is empty, all ingestions
+	// will be provided.
+	rpc ListStatuses(ListStatusesRequest) returns (ListStatusesResponse);
+
+	// Write begins or resumes writes to a resource identified by a unique ref.
+	// Only one active stream may exist at a time for each ref.
+	//
+	// Once a write stream has started, it may only write to a single ref, thus
+	// once a stream is started, the ref may be ommitted on subsequent writes.
+	//
+	// For any write transaction represented by a ref, only a single write may
+	// be made to a given offset. If overlapping writes occur, it is an error.
+	// Writes should be sequential and implementations may throw an error if
+	// this is required.
+	//
+	// If expected_digest is set and already part of the content store, the
+	// write will fail.
+	//
+	// When completed, the commit flag should be set to true. If expected size
+	// or digest is set, the content will be validated against those values.
+	rpc Write(stream WriteContentRequest) returns (stream WriteContentResponse);
+
+	// Abort cancels the ongoing write named in the request. Any resources
+	// associated with the write will be collected.
+	rpc Abort(AbortRequest) returns (google.protobuf.Empty);
+}
+
+message Info {
+	// Digest is the hash identity of the blob.
+	string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+
+	// Size is the total number of bytes in the blob.
+	int64 size = 2;
+
+	// CreatedAt provides the time at which the blob was committed.
+	google.protobuf.Timestamp created_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// UpdatedAt provides the time the info was last updated.
+	google.protobuf.Timestamp updated_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels  = 5;
+}
+
+message InfoRequest {
+	string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+}
+
+message InfoResponse {
+	Info info = 1 [(gogoproto.nullable) = false];
+}
+
+message UpdateRequest {
+	Info info = 1 [(gogoproto.nullable) = false];
+
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	//
+	// In info, Digest, Size, and CreatedAt are immutable,
+	// other field may be updated using this mask.
+	// If no mask is provided, all mutable field are updated.
+	google.protobuf.FieldMask update_mask = 2;
+}
+
+message UpdateResponse {
+	Info info = 1 [(gogoproto.nullable) = false];
+}
+
+message ListContentRequest {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, containers that match the following will be
+	// returned:
+	//
+	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	repeated string filters = 1;
+}
+
+message ListContentResponse {
+	repeated Info info = 1 [(gogoproto.nullable) = false];
+}
+
+message DeleteContentRequest {
+	// Digest specifies which content to delete.
+	string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+}
+
+// ReadContentRequest defines the fields that make up a request to read a portion of
+// data from a stored object.
+message ReadContentRequest {
+	// Digest is the hash identity to read.
+	string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+
+	// Offset specifies the number of bytes from the start at which to begin
+	// the read. If zero or less, the read will be from the start. This uses
+	// standard zero-indexed semantics.
+	int64 offset = 2;
+
+	// size is the total size of the read. If zero, the entire blob will be
+	// returned by the service.
+	int64 size = 3;
+}
+
+// ReadContentResponse carries byte data for a read request.
+message ReadContentResponse {
+	int64 offset = 1; // offset of the returned data
+	bytes data = 2; // actual data
+}
+
+message Status {
+	google.protobuf.Timestamp started_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+	google.protobuf.Timestamp updated_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+	string ref = 3;
+	int64 offset = 4;
+	int64 total = 5;
+	string expected = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+}
+
+
+message StatusRequest {
+	string ref = 1;
+}
+
+message StatusResponse {
+	Status status = 1;
+}
+
+message ListStatusesRequest {
+	repeated string filters = 1;
+}
+
+message ListStatusesResponse {
+	repeated Status statuses = 1 [(gogoproto.nullable) = false];
+}
+
+// WriteAction defines the behavior of a WriteRequest.
+enum WriteAction {
+	option (gogoproto.goproto_enum_prefix) = false;
+	option (gogoproto.enum_customname) = "WriteAction";
+
+	// WriteActionStat instructs the writer to return the current status while
+	// holding the lock on the write.
+	STAT = 0 [(gogoproto.enumvalue_customname) = "WriteActionStat"];
+
+	// WriteActionWrite sets the action for the write request to write data.
+	//
+	// Any data included will be written at the provided offset. The
+	// transaction will be left open for further writes.
+	//
+	// This is the default.
+	WRITE = 1 [(gogoproto.enumvalue_customname) = "WriteActionWrite"];
+
+	// WriteActionCommit will write any outstanding data in the message and
+	// commit the write, storing it under the digest.
+	//
+	// This can be used in a single message to send the data, verify it and
+	// commit it.
+	//
+	// This action will always terminate the write.
+	COMMIT = 2 [(gogoproto.enumvalue_customname) = "WriteActionCommit"];
+}
+
+// WriteContentRequest writes data to the request ref at offset.
+message WriteContentRequest {
+	// Action sets the behavior of the write.
+	//
+	// When this is a write and the ref is not yet allocated, the ref will be
+	// allocated and the data will be written at offset.
+	//
+	// If the action is write and the ref is allocated, it will accept data to
+	// an offset that has not yet been written.
+	//
+	// If the action is write and there is no data, the current write status
+	// will be returned. This works differently from status because the stream
+	// holds a lock.
+	WriteAction action = 1;
+
+	// Ref identifies the pre-commit object to write to.
+	string ref = 2;
+
+	// Total can be set to have the service validate the total size of the
+	// committed content.
+	//
+	// The latest value before or with the commit action message will be use to
+	// validate the content. If the offset overflows total, the service may
+	// report an error. It is only required on one message for the write.
+	//
+	// If the value is zero or less, no validation of the final content will be
+	// performed.
+	int64 total = 3;
+
+	// Expected can be set to have the service validate the final content against
+	// the provided digest.
+	//
+	// If the digest is already present in the object store, an AlreadyExists
+	// error will be returned.
+	//
+	// Only the latest version will be used to check the content against the
+	// digest. It is only required to include it on a single message, before or
+	// with the commit action message.
+	string expected = 4 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+
+	// Offset specifies the number of bytes from the start at which to begin
+	// the write. For most implementations, this means from the start of the
+	// file. This uses standard, zero-indexed semantics.
+	//
+	// If the action is write, the remote may remove all previously written
+	// data after the offset. Implementations may support arbitrary offsets but
+	// MUST support reseting this value to zero with a write. If an
+	// implementation does not support a write at a particular offset, an
+	// OutOfRange error must be returned.
+	int64 offset = 5;
+
+	// Data is the actual bytes to be written.
+	//
+	// If this is empty and the message is not a commit, a response will be
+	// returned with the current write state.
+	bytes data = 6;
+
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels  = 7;
+}
+
+// WriteContentResponse is returned on the culmination of a write call.
+message WriteContentResponse {
+	// Action contains the action for the final message of the stream. A writer
+	// should confirm that they match the intended result.
+	WriteAction action = 1;
+
+	// StartedAt provides the time at which the write began.
+	//
+	// This must be set for stat and commit write actions. All other write
+	// actions may omit this.
+	google.protobuf.Timestamp started_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// UpdatedAt provides the last time of a successful write.
+	//
+	// This must be set for stat and commit write actions. All other write
+	// actions may omit this.
+	google.protobuf.Timestamp updated_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// Offset is the current committed size for the write.
+	int64 offset = 4;
+
+	// Total provides the current, expected total size of the write.
+	//
+	// We include this to provide consistency with the Status structure on the
+	// client writer.
+	//
+	// This is only valid on the Stat and Commit response.
+	int64 total = 5;
+
+	// Digest, if present, includes the digest up to the currently committed
+	// bytes. If action is commit, this field will be set. It is implementation
+	// defined if this is set for other actions.
+	string digest = 6 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+}
+
+message AbortRequest {
+	string ref = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
new file mode 100644
index 0000000..b19a377
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.pb.go
@@ -0,0 +1,1268 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/diff/v1/diff.proto
+// DO NOT EDIT!
+
+/*
+	Package diff is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/diff/v1/diff.proto
+
+	It has these top-level messages:
+		ApplyRequest
+		ApplyResponse
+		DiffRequest
+		DiffResponse
+*/
+package diff
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import containerd_types "github.com/containerd/containerd/api/types"
+import containerd_types1 "github.com/containerd/containerd/api/types"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type ApplyRequest struct {
+	// Diff is the descriptor of the diff to be extracted
+	Diff   *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=diff" json:"diff,omitempty"`
+	Mounts []*containerd_types.Mount     `protobuf:"bytes,2,rep,name=mounts" json:"mounts,omitempty"`
+}
+
+func (m *ApplyRequest) Reset()                    { *m = ApplyRequest{} }
+func (*ApplyRequest) ProtoMessage()               {}
+func (*ApplyRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{0} }
+
+type ApplyResponse struct {
+	// Applied is the descriptor for the object which was applied.
+	// If the input was a compressed blob then the result will be
+	// the descriptor for the uncompressed blob.
+	Applied *containerd_types1.Descriptor `protobuf:"bytes,1,opt,name=applied" json:"applied,omitempty"`
+}
+
+func (m *ApplyResponse) Reset()                    { *m = ApplyResponse{} }
+func (*ApplyResponse) ProtoMessage()               {}
+func (*ApplyResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{1} }
+
+type DiffRequest struct {
+	// Left are the mounts which represent the older copy
+	// in which is the base of the computed changes.
+	Left []*containerd_types.Mount `protobuf:"bytes,1,rep,name=left" json:"left,omitempty"`
+	// Right are the mounts which represents the newer copy
+	// in which changes from the left were made into.
+	Right []*containerd_types.Mount `protobuf:"bytes,2,rep,name=right" json:"right,omitempty"`
+	// MediaType is the media type descriptor for the created diff
+	// object
+	MediaType string `protobuf:"bytes,3,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+	// Ref identifies the pre-commit content store object. This
+	// reference can be used to get the status from the content store.
+	Ref string `protobuf:"bytes,4,opt,name=ref,proto3" json:"ref,omitempty"`
+	// Labels are the labels to apply to the generated content
+	// on content store commit.
+	Labels map[string]string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *DiffRequest) Reset()                    { *m = DiffRequest{} }
+func (*DiffRequest) ProtoMessage()               {}
+func (*DiffRequest) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{2} }
+
+type DiffResponse struct {
+	// Diff is the descriptor of the diff which can be applied
+	Diff *containerd_types1.Descriptor `protobuf:"bytes,3,opt,name=diff" json:"diff,omitempty"`
+}
+
+func (m *DiffResponse) Reset()                    { *m = DiffResponse{} }
+func (*DiffResponse) ProtoMessage()               {}
+func (*DiffResponse) Descriptor() ([]byte, []int) { return fileDescriptorDiff, []int{3} }
+
+func init() {
+	proto.RegisterType((*ApplyRequest)(nil), "containerd.services.diff.v1.ApplyRequest")
+	proto.RegisterType((*ApplyResponse)(nil), "containerd.services.diff.v1.ApplyResponse")
+	proto.RegisterType((*DiffRequest)(nil), "containerd.services.diff.v1.DiffRequest")
+	proto.RegisterType((*DiffResponse)(nil), "containerd.services.diff.v1.DiffResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Diff service
+
+type DiffClient interface {
+	// Apply applies the content associated with the provided digests onto
+	// the provided mounts. Archive content will be extracted and
+	// decompressed if necessary.
+	Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error)
+	// Diff creates a diff between the given mounts and uploads the result
+	// to the content store.
+	Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error)
+}
+
+type diffClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewDiffClient(cc *grpc.ClientConn) DiffClient {
+	return &diffClient{cc}
+}
+
+func (c *diffClient) Apply(ctx context.Context, in *ApplyRequest, opts ...grpc.CallOption) (*ApplyResponse, error) {
+	out := new(ApplyResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Apply", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *diffClient) Diff(ctx context.Context, in *DiffRequest, opts ...grpc.CallOption) (*DiffResponse, error) {
+	out := new(DiffResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.diff.v1.Diff/Diff", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Diff service
+
+type DiffServer interface {
+	// Apply applies the content associated with the provided digests onto
+	// the provided mounts. Archive content will be extracted and
+	// decompressed if necessary.
+	Apply(context.Context, *ApplyRequest) (*ApplyResponse, error)
+	// Diff creates a diff between the given mounts and uploads the result
+	// to the content store.
+	Diff(context.Context, *DiffRequest) (*DiffResponse, error)
+}
+
+func RegisterDiffServer(s *grpc.Server, srv DiffServer) {
+	s.RegisterService(&_Diff_serviceDesc, srv)
+}
+
+func _Diff_Apply_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ApplyRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(DiffServer).Apply(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.diff.v1.Diff/Apply",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(DiffServer).Apply(ctx, req.(*ApplyRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Diff_Diff_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DiffRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(DiffServer).Diff(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.diff.v1.Diff/Diff",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(DiffServer).Diff(ctx, req.(*DiffRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Diff_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.diff.v1.Diff",
+	HandlerType: (*DiffServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Apply",
+			Handler:    _Diff_Apply_Handler,
+		},
+		{
+			MethodName: "Diff",
+			Handler:    _Diff_Diff_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/containerd/containerd/api/services/diff/v1/diff.proto",
+}
+
+func (m *ApplyRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ApplyRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Diff != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size()))
+		n1, err := m.Diff.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	if len(m.Mounts) > 0 {
+		for _, msg := range m.Mounts {
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintDiff(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *ApplyResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ApplyResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Applied != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintDiff(dAtA, i, uint64(m.Applied.Size()))
+		n2, err := m.Applied.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	return i, nil
+}
+
+func (m *DiffRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DiffRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Left) > 0 {
+		for _, msg := range m.Left {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintDiff(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.Right) > 0 {
+		for _, msg := range m.Right {
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintDiff(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.MediaType) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintDiff(dAtA, i, uint64(len(m.MediaType)))
+		i += copy(dAtA[i:], m.MediaType)
+	}
+	if len(m.Ref) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintDiff(dAtA, i, uint64(len(m.Ref)))
+		i += copy(dAtA[i:], m.Ref)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x2a
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovDiff(uint64(len(k))) + 1 + len(v) + sovDiff(uint64(len(v)))
+			i = encodeVarintDiff(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintDiff(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintDiff(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *DiffResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DiffResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Diff != nil {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintDiff(dAtA, i, uint64(m.Diff.Size()))
+		n3, err := m.Diff.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n3
+	}
+	return i, nil
+}
+
+func encodeFixed64Diff(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Diff(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintDiff(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *ApplyRequest) Size() (n int) {
+	var l int
+	_ = l
+	if m.Diff != nil {
+		l = m.Diff.Size()
+		n += 1 + l + sovDiff(uint64(l))
+	}
+	if len(m.Mounts) > 0 {
+		for _, e := range m.Mounts {
+			l = e.Size()
+			n += 1 + l + sovDiff(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ApplyResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Applied != nil {
+		l = m.Applied.Size()
+		n += 1 + l + sovDiff(uint64(l))
+	}
+	return n
+}
+
+func (m *DiffRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Left) > 0 {
+		for _, e := range m.Left {
+			l = e.Size()
+			n += 1 + l + sovDiff(uint64(l))
+		}
+	}
+	if len(m.Right) > 0 {
+		for _, e := range m.Right {
+			l = e.Size()
+			n += 1 + l + sovDiff(uint64(l))
+		}
+	}
+	l = len(m.MediaType)
+	if l > 0 {
+		n += 1 + l + sovDiff(uint64(l))
+	}
+	l = len(m.Ref)
+	if l > 0 {
+		n += 1 + l + sovDiff(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovDiff(uint64(len(k))) + 1 + len(v) + sovDiff(uint64(len(v)))
+			n += mapEntrySize + 1 + sovDiff(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *DiffResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Diff != nil {
+		l = m.Diff.Size()
+		n += 1 + l + sovDiff(uint64(l))
+	}
+	return n
+}
+
+func sovDiff(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozDiff(x uint64) (n int) {
+	return sovDiff(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ApplyRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ApplyRequest{`,
+		`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
+		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ApplyResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ApplyResponse{`,
+		`Applied:` + strings.Replace(fmt.Sprintf("%v", this.Applied), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DiffRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&DiffRequest{`,
+		`Left:` + strings.Replace(fmt.Sprintf("%v", this.Left), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Right:` + strings.Replace(fmt.Sprintf("%v", this.Right), "Mount", "containerd_types.Mount", 1) + `,`,
+		`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
+		`Ref:` + fmt.Sprintf("%v", this.Ref) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DiffResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DiffResponse{`,
+		`Diff:` + strings.Replace(fmt.Sprintf("%v", this.Diff), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringDiff(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *ApplyRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowDiff
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ApplyRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ApplyRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Diff", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Diff == nil {
+				m.Diff = &containerd_types1.Descriptor{}
+			}
+			if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Mounts = append(m.Mounts, &containerd_types.Mount{})
+			if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipDiff(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthDiff
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ApplyResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowDiff
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ApplyResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ApplyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Applied", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Applied == nil {
+				m.Applied = &containerd_types1.Descriptor{}
+			}
+			if err := m.Applied.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipDiff(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthDiff
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DiffRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowDiff
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DiffRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DiffRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Left", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Left = append(m.Left, &containerd_types.Mount{})
+			if err := m.Left[len(m.Left)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Right", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Right = append(m.Right, &containerd_types.Mount{})
+			if err := m.Right[len(m.Right)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MediaType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Ref = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowDiff
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowDiff
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthDiff
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipDiff(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthDiff
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DiffResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowDiff
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DiffResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DiffResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Diff", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthDiff
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Diff == nil {
+				m.Diff = &containerd_types1.Descriptor{}
+			}
+			if err := m.Diff.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipDiff(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthDiff
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipDiff(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowDiff
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowDiff
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthDiff
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowDiff
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipDiff(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthDiff = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowDiff   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/diff/v1/diff.proto", fileDescriptorDiff)
+}
+
+var fileDescriptorDiff = []byte{
+	// 454 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30,
+	0x14, 0x9f, 0xfb, 0x0f, 0xf5, 0x75, 0x48, 0xc8, 0x9a, 0x44, 0x14, 0x20, 0xaa, 0x7a, 0xea, 0x40,
+	0x38, 0xac, 0xa0, 0x09, 0xb6, 0xcb, 0x40, 0x43, 0x5c, 0xc6, 0x25, 0xda, 0x09, 0x24, 0x50, 0xda,
+	0xbc, 0x74, 0x16, 0x69, 0xec, 0xd9, 0x6e, 0xa5, 0xdc, 0xf8, 0x2e, 0x7c, 0x14, 0x2e, 0x3b, 0x72,
+	0xe4, 0x48, 0xfb, 0x49, 0x90, 0x9d, 0x14, 0x22, 0x21, 0x95, 0xc0, 0x29, 0x2f, 0xcf, 0xbf, 0x7f,
+	0xf6, 0xb3, 0xe1, 0x6c, 0xce, 0xcd, 0xd5, 0x72, 0xca, 0x66, 0x62, 0x11, 0xce, 0x44, 0x6e, 0x62,
+	0x9e, 0xa3, 0x4a, 0xea, 0x65, 0x2c, 0x79, 0xa8, 0x51, 0xad, 0xf8, 0x0c, 0x75, 0x98, 0xf0, 0x34,
+	0x0d, 0x57, 0x47, 0xee, 0xcb, 0xa4, 0x12, 0x46, 0xd0, 0x7b, 0xbf, 0xb1, 0x6c, 0x8b, 0x63, 0x6e,
+	0x7d, 0x75, 0xe4, 0x1f, 0xcc, 0xc5, 0x5c, 0x38, 0x5c, 0x68, 0xab, 0x92, 0xe2, 0x1f, 0x37, 0x32,
+	0x35, 0x85, 0x44, 0x1d, 0x2e, 0xc4, 0x32, 0x37, 0x15, 0xef, 0xf4, 0x1f, 0x78, 0x09, 0xea, 0x99,
+	0xe2, 0xd2, 0x08, 0x55, 0x92, 0x47, 0xd7, 0xb0, 0xff, 0x52, 0xca, 0xac, 0x88, 0xf0, 0x7a, 0x89,
+	0xda, 0xd0, 0x27, 0xd0, 0xb1, 0x29, 0x3d, 0x32, 0x24, 0xe3, 0xc1, 0xe4, 0x3e, 0xab, 0x6d, 0xc3,
+	0x29, 0xb0, 0xf3, 0x5f, 0x0a, 0x91, 0x43, 0xd2, 0x10, 0x7a, 0x2e, 0x8d, 0xf6, 0x5a, 0xc3, 0xf6,
+	0x78, 0x30, 0xb9, 0xfb, 0x27, 0xe7, 0xad, 0x5d, 0x8f, 0x2a, 0xd8, 0xe8, 0x0d, 0xdc, 0xae, 0x2c,
+	0xb5, 0x14, 0xb9, 0x46, 0x7a, 0x0c, 0xb7, 0x62, 0x29, 0x33, 0x8e, 0x49, 0x23, 0xdb, 0x2d, 0x78,
+	0xf4, 0xa5, 0x05, 0x83, 0x73, 0x9e, 0xa6, 0xdb, 0xec, 0x8f, 0xa0, 0x93, 0x61, 0x6a, 0x3c, 0xb2,
+	0x3b, 0x87, 0x03, 0xd1, 0xc7, 0xd0, 0x55, 0x7c, 0x7e, 0x65, 0xfe, 0x96, 0xba, 0x44, 0xd1, 0x07,
+	0x00, 0x0b, 0x4c, 0x78, 0xfc, 0xd1, 0xae, 0x79, 0xed, 0x21, 0x19, 0xf7, 0xa3, 0xbe, 0xeb, 0x5c,
+	0x16, 0x12, 0xe9, 0x1d, 0x68, 0x2b, 0x4c, 0xbd, 0x8e, 0xeb, 0xdb, 0x92, 0x5e, 0x40, 0x2f, 0x8b,
+	0xa7, 0x98, 0x69, 0xaf, 0xeb, 0x0c, 0x9e, 0xb1, 0x1d, 0x37, 0x82, 0xd5, 0xb6, 0xc1, 0x2e, 0x1c,
+	0xed, 0x75, 0x6e, 0x54, 0x11, 0x55, 0x1a, 0xfe, 0x0b, 0x18, 0xd4, 0xda, 0xd6, 0xee, 0x13, 0x16,
+	0xee, 0xb4, 0xfa, 0x91, 0x2d, 0xe9, 0x01, 0x74, 0x57, 0x71, 0xb6, 0x44, 0xaf, 0xe5, 0x7a, 0xe5,
+	0xcf, 0x49, 0xeb, 0x39, 0x19, 0x9d, 0xc1, 0x7e, 0xa9, 0x5e, 0x9d, 0xf6, 0x76, 0xc2, 0xed, 0xa6,
+	0x13, 0x9e, 0x7c, 0x25, 0xd0, 0xb1, 0x12, 0xf4, 0x03, 0x74, 0xdd, 0xe4, 0xe8, 0xe1, 0xce, 0xcd,
+	0xd4, 0x2f, 0x94, 0xff, 0xb0, 0x09, 0xb4, 0x8a, 0xf6, 0xbe, 0xf2, 0x19, 0x37, 0x3d, 0x2b, 0xff,
+	0xb0, 0x01, 0xb2, 0x14, 0x7f, 0x75, 0x79, 0xb3, 0x0e, 0xf6, 0xbe, 0xaf, 0x83, 0xbd, 0xcf, 0x9b,
+	0x80, 0xdc, 0x6c, 0x02, 0xf2, 0x6d, 0x13, 0x90, 0x1f, 0x9b, 0x80, 0xbc, 0x3b, 0xf9, 0xaf, 0xd7,
+	0x7e, 0x6a, 0xbf, 0xd3, 0x9e, 0x7b, 0x46, 0x4f, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x01,
+	0x51, 0xf0, 0x32, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
new file mode 100644
index 0000000..d1d2a82
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/diff/v1/diff.proto
@@ -0,0 +1,62 @@
+syntax = "proto3";
+
+package containerd.services.diff.v1;
+
+import "gogoproto/gogo.proto";
+import "github.com/containerd/containerd/api/types/mount.proto";
+import "github.com/containerd/containerd/api/types/descriptor.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/diff/v1;diff";
+
+// Diff service creates and applies diffs
+service Diff {
+	// Apply applies the content associated with the provided digests onto
+	// the provided mounts. Archive content will be extracted and
+	// decompressed if necessary.
+	rpc Apply(ApplyRequest) returns (ApplyResponse);
+
+	// Diff creates a diff between the given mounts and uploads the result
+	// to the content store.
+	rpc Diff(DiffRequest) returns (DiffResponse);
+}
+
+message ApplyRequest {
+	// Diff is the descriptor of the diff to be extracted
+	containerd.types.Descriptor diff = 1;
+
+	repeated containerd.types.Mount mounts = 2;
+}
+
+message ApplyResponse {
+	// Applied is the descriptor for the object which was applied.
+	// If the input was a compressed blob then the result will be
+	// the descriptor for the uncompressed blob.
+	containerd.types.Descriptor applied = 1;
+}
+
+message DiffRequest {
+	// Left are the mounts which represent the older copy
+	// in which is the base of the computed changes.
+	repeated containerd.types.Mount left = 1;
+
+	// Right are the mounts which represents the newer copy
+	// in which changes from the left were made into.
+	repeated containerd.types.Mount right = 2;
+
+	// MediaType is the media type descriptor for the created diff
+	// object
+	string media_type = 3;
+
+	// Ref identifies the pre-commit content store object. This
+	// reference can be used to get the status from the content store.
+	string ref = 4;
+
+	// Labels are the labels to apply to the generated content
+	// on content store commit.
+	map<string, string> labels = 5;
+}
+
+message DiffResponse {
+	// Diff is the descriptor of the diff which can be applied
+	containerd.types.Descriptor diff = 3;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
new file mode 100644
index 0000000..420aba0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
@@ -0,0 +1,1262 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/events/v1/container.proto
+// DO NOT EDIT!
+
+/*
+	Package events is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/events/v1/container.proto
+		github.com/containerd/containerd/api/services/events/v1/content.proto
+		github.com/containerd/containerd/api/services/events/v1/events.proto
+		github.com/containerd/containerd/api/services/events/v1/image.proto
+		github.com/containerd/containerd/api/services/events/v1/namespace.proto
+		github.com/containerd/containerd/api/services/events/v1/snapshot.proto
+		github.com/containerd/containerd/api/services/events/v1/task.proto
+
+	It has these top-level messages:
+		ContainerCreate
+		ContainerUpdate
+		ContainerDelete
+		ContentDelete
+		PublishRequest
+		ForwardRequest
+		SubscribeRequest
+		Envelope
+		ImageCreate
+		ImageUpdate
+		ImageDelete
+		NamespaceCreate
+		NamespaceUpdate
+		NamespaceDelete
+		SnapshotPrepare
+		SnapshotCommit
+		SnapshotRemove
+		TaskCreate
+		TaskStart
+		TaskDelete
+		TaskIO
+		TaskExit
+		TaskOOM
+		TaskExecAdded
+		TaskExecStarted
+		TaskPaused
+		TaskResumed
+		TaskCheckpointed
+*/
+package events
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
+import _ "github.com/containerd/containerd/protobuf/plugin"
+
+import github_com_containerd_typeurl "github.com/containerd/typeurl"
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type ContainerCreate struct {
+	ID      string                   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Image   string                   `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
+	Runtime *ContainerCreate_Runtime `protobuf:"bytes,3,opt,name=runtime" json:"runtime,omitempty"`
+}
+
+func (m *ContainerCreate) Reset()                    { *m = ContainerCreate{} }
+func (*ContainerCreate) ProtoMessage()               {}
+func (*ContainerCreate) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{0} }
+
+type ContainerCreate_Runtime struct {
+	Name    string                `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Options *google_protobuf1.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *ContainerCreate_Runtime) Reset()      { *m = ContainerCreate_Runtime{} }
+func (*ContainerCreate_Runtime) ProtoMessage() {}
+func (*ContainerCreate_Runtime) Descriptor() ([]byte, []int) {
+	return fileDescriptorContainer, []int{0, 0}
+}
+
+type ContainerUpdate struct {
+	ID          string            `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Image       string            `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
+	Labels      map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	SnapshotKey string            `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"`
+}
+
+func (m *ContainerUpdate) Reset()                    { *m = ContainerUpdate{} }
+func (*ContainerUpdate) ProtoMessage()               {}
+func (*ContainerUpdate) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{1} }
+
+type ContainerDelete struct {
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *ContainerDelete) Reset()                    { *m = ContainerDelete{} }
+func (*ContainerDelete) ProtoMessage()               {}
+func (*ContainerDelete) Descriptor() ([]byte, []int) { return fileDescriptorContainer, []int{2} }
+
+func init() {
+	proto.RegisterType((*ContainerCreate)(nil), "containerd.services.events.v1.ContainerCreate")
+	proto.RegisterType((*ContainerCreate_Runtime)(nil), "containerd.services.events.v1.ContainerCreate.Runtime")
+	proto.RegisterType((*ContainerUpdate)(nil), "containerd.services.events.v1.ContainerUpdate")
+	proto.RegisterType((*ContainerDelete)(nil), "containerd.services.events.v1.ContainerDelete")
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *ContainerCreate) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "id":
+		return string(m.ID), len(m.ID) > 0
+	case "image":
+		return string(m.Image), len(m.Image) > 0
+	case "runtime":
+		// NOTE(stevvooe): This is probably not correct in many cases.
+		// We assume that the target message also implements the Field
+		// method, which isn't likely true in a lot of cases.
+		//
+		// If you have a broken build and have found this comment,
+		// you may be closer to a solution.
+		if m.Runtime == nil {
+			return "", false
+		}
+
+		return m.Runtime.Field(fieldpath[1:])
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *ContainerCreate_Runtime) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "name":
+		return string(m.Name), len(m.Name) > 0
+	case "options":
+		decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Options)
+		if err != nil {
+			return "", false
+		}
+
+		adaptor, ok := decoded.(interface {
+			Field([]string) (string, bool)
+		})
+		if !ok {
+			return "", false
+		}
+		return adaptor.Field(fieldpath[1:])
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *ContainerUpdate) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "id":
+		return string(m.ID), len(m.ID) > 0
+	case "image":
+		return string(m.Image), len(m.Image) > 0
+	case "labels":
+		// Labels fields have been special-cased by name. If this breaks,
+		// add better special casing to fieldpath plugin.
+		if len(m.Labels) == 0 {
+			return "", false
+		}
+		value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
+		return value, ok
+	case "snapshot_key":
+		return string(m.SnapshotKey), len(m.SnapshotKey) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *ContainerDelete) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "id":
+		return string(m.ID), len(m.ID) > 0
+	}
+	return "", false
+}
+func (m *ContainerCreate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerCreate) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if len(m.Image) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(len(m.Image)))
+		i += copy(dAtA[i:], m.Image)
+	}
+	if m.Runtime != nil {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(m.Runtime.Size()))
+		n1, err := m.Runtime.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	return i, nil
+}
+
+func (m *ContainerCreate_Runtime) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerCreate_Runtime) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if m.Options != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(m.Options.Size()))
+		n2, err := m.Options.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	return i, nil
+}
+
+func (m *ContainerUpdate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerUpdate) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if len(m.Image) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(len(m.Image)))
+		i += copy(dAtA[i:], m.Image)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x1a
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovContainer(uint64(len(k))) + 1 + len(v) + sovContainer(uint64(len(v)))
+			i = encodeVarintContainer(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintContainer(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintContainer(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	if len(m.SnapshotKey) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(len(m.SnapshotKey)))
+		i += copy(dAtA[i:], m.SnapshotKey)
+	}
+	return i, nil
+}
+
+func (m *ContainerDelete) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContainerDelete) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContainer(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	return i, nil
+}
+
+func encodeFixed64Container(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Container(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintContainer(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *ContainerCreate) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	l = len(m.Image)
+	if l > 0 {
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	if m.Runtime != nil {
+		l = m.Runtime.Size()
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	return n
+}
+
+func (m *ContainerCreate_Runtime) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	if m.Options != nil {
+		l = m.Options.Size()
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	return n
+}
+
+func (m *ContainerUpdate) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	l = len(m.Image)
+	if l > 0 {
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovContainer(uint64(len(k))) + 1 + len(v) + sovContainer(uint64(len(v)))
+			n += mapEntrySize + 1 + sovContainer(uint64(mapEntrySize))
+		}
+	}
+	l = len(m.SnapshotKey)
+	if l > 0 {
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	return n
+}
+
+func (m *ContainerDelete) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovContainer(uint64(l))
+	}
+	return n
+}
+
+func sovContainer(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozContainer(x uint64) (n int) {
+	return sovContainer(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ContainerCreate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerCreate{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+		`Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "ContainerCreate_Runtime", "ContainerCreate_Runtime", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerCreate_Runtime) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerCreate_Runtime{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerUpdate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&ContainerUpdate{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ContainerDelete) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContainerDelete{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringContainer(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *ContainerCreate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainer
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerCreate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerCreate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Image = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Runtime == nil {
+				m.Runtime = &ContainerCreate_Runtime{}
+			}
+			if err := m.Runtime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainer(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainer
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerCreate_Runtime) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainer
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Runtime: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Runtime: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = &google_protobuf1.Any{}
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainer(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainer
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerUpdate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainer
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerUpdate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Image = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContainer
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowContainer
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthContainer
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SnapshotKey = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainer(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainer
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ContainerDelete) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContainer
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContainerDelete: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContainerDelete: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContainer
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContainer(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContainer
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipContainer(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowContainer
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowContainer
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthContainer
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowContainer
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipContainer(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthContainer = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowContainer   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/container.proto", fileDescriptorContainer)
+}
+
+var fileDescriptorContainer = []byte{
+	// 427 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x5d, 0x8b, 0xd4, 0x30,
+	0x14, 0xdd, 0x74, 0xd6, 0x19, 0x4c, 0x05, 0x25, 0x0c, 0x52, 0x07, 0xac, 0xe3, 0x3c, 0x8d, 0x2f,
+	0x09, 0x3b, 0x82, 0xe8, 0x0a, 0xa2, 0xbb, 0x2b, 0x22, 0x2a, 0x48, 0xc0, 0x17, 0x11, 0x24, 0x9d,
+	0xde, 0xed, 0x04, 0xdb, 0xa4, 0xb4, 0x69, 0xa1, 0x6f, 0xfe, 0x14, 0x7f, 0xce, 0x3e, 0xfa, 0xe8,
+	0x93, 0xec, 0xf6, 0x1f, 0x08, 0xfe, 0x00, 0x69, 0x32, 0xdd, 0x2d, 0x82, 0x9f, 0x6f, 0xe7, 0xe6,
+	0x9e, 0x73, 0xef, 0x39, 0xb7, 0xc5, 0xcf, 0x12, 0x69, 0x36, 0x55, 0x44, 0xd7, 0x3a, 0x63, 0x6b,
+	0xad, 0x8c, 0x90, 0x0a, 0x8a, 0x78, 0x08, 0x45, 0x2e, 0x59, 0x09, 0x45, 0x2d, 0xd7, 0x50, 0x32,
+	0xa8, 0x41, 0x99, 0x92, 0xd5, 0x7b, 0x17, 0x0c, 0x9a, 0x17, 0xda, 0x68, 0x72, 0xf3, 0x42, 0x42,
+	0x7b, 0x3a, 0x75, 0x74, 0x5a, 0xef, 0xcd, 0xa6, 0x89, 0x4e, 0xb4, 0x65, 0xb2, 0x0e, 0x39, 0xd1,
+	0xec, 0x46, 0xa2, 0x75, 0x92, 0x02, 0xb3, 0x55, 0x54, 0x1d, 0x33, 0xa1, 0x9a, 0x6d, 0xeb, 0xf1,
+	0x1f, 0x8d, 0x9d, 0x8b, 0xf2, 0xb4, 0x4a, 0xa4, 0x62, 0xc7, 0x12, 0xd2, 0x38, 0x17, 0x66, 0xe3,
+	0x26, 0x2c, 0x4e, 0x11, 0xbe, 0x7a, 0xd8, 0xd3, 0x0f, 0x0b, 0x10, 0x06, 0xc8, 0x75, 0xec, 0xc9,
+	0x38, 0x40, 0x73, 0xb4, 0xbc, 0x7c, 0x30, 0x6e, 0xbf, 0xde, 0xf2, 0x9e, 0x1f, 0x71, 0x4f, 0xc6,
+	0x64, 0x8a, 0x2f, 0xc9, 0x4c, 0x24, 0x10, 0x78, 0x5d, 0x8b, 0xbb, 0x82, 0xbc, 0xc6, 0x93, 0xa2,
+	0x52, 0x46, 0x66, 0x10, 0x8c, 0xe6, 0x68, 0xe9, 0xaf, 0xee, 0xd1, 0xdf, 0xa6, 0xa4, 0x3f, 0xad,
+	0xa3, 0xdc, 0xa9, 0x79, 0x3f, 0x66, 0xf6, 0x0a, 0x4f, 0xb6, 0x6f, 0x84, 0xe0, 0x5d, 0x25, 0x32,
+	0x70, 0x66, 0xb8, 0xc5, 0x84, 0xe2, 0x89, 0xce, 0x8d, 0xd4, 0xaa, 0xb4, 0x46, 0xfc, 0xd5, 0x94,
+	0xba, 0x0b, 0xd1, 0x3e, 0x2c, 0x7d, 0xa2, 0x1a, 0xde, 0x93, 0x16, 0xdf, 0x86, 0x11, 0xdf, 0xe4,
+	0xf1, 0xbf, 0x47, 0xe4, 0x78, 0x9c, 0x8a, 0x08, 0xd2, 0x32, 0x18, 0xcd, 0x47, 0x4b, 0x7f, 0xb5,
+	0xff, 0xb7, 0x09, 0xdd, 0x36, 0xfa, 0xd2, 0x8a, 0x9f, 0x2a, 0x53, 0x34, 0x7c, 0x3b, 0x89, 0xdc,
+	0xc6, 0x57, 0x4a, 0x25, 0xf2, 0x72, 0xa3, 0xcd, 0xfb, 0x0f, 0xd0, 0x04, 0xbb, 0x76, 0xa1, 0xdf,
+	0xbf, 0xbd, 0x80, 0x66, 0xf6, 0x00, 0xfb, 0x03, 0x25, 0xb9, 0x86, 0x47, 0x1d, 0xd1, 0x9d, 0xa2,
+	0x83, 0x9d, 0xdb, 0x5a, 0xa4, 0xd5, 0xb9, 0x5b, 0x5b, 0xec, 0x7b, 0xf7, 0xd1, 0xe2, 0xce, 0x20,
+	0xf2, 0x11, 0xa4, 0xf0, 0xeb, 0xc8, 0x07, 0xef, 0x4e, 0xce, 0xc2, 0x9d, 0x2f, 0x67, 0xe1, 0xce,
+	0xc7, 0x36, 0x44, 0x27, 0x6d, 0x88, 0x3e, 0xb7, 0x21, 0x3a, 0x6d, 0x43, 0xf4, 0xe9, 0x7b, 0x88,
+	0xde, 0x3e, 0xfa, 0xcf, 0x5f, 0xff, 0xa1, 0x43, 0xd1, 0xd8, 0x7e, 0x93, 0xbb, 0x3f, 0x02, 0x00,
+	0x00, 0xff, 0xff, 0x26, 0x0d, 0x55, 0x1c, 0x43, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/container.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/container.proto
new file mode 100644
index 0000000..ca8acb8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/container.proto
@@ -0,0 +1,31 @@
+syntax = "proto3";
+
+package containerd.services.events.v1;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message ContainerCreate {
+	string id = 1;
+	string image = 2;
+	message Runtime {
+		string name = 1;
+		google.protobuf.Any options = 2;
+	}
+	Runtime runtime = 3;
+}
+
+message ContainerUpdate {
+	string id = 1;
+	string image = 2;
+	map<string, string> labels  = 3;
+	string snapshot_key = 4;
+}
+
+message ContainerDelete {
+	string id = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go
new file mode 100644
index 0000000..6fc5d61
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/content.pb.go
@@ -0,0 +1,348 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/events/v1/content.proto
+// DO NOT EDIT!
+
+package events
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/containerd/containerd/protobuf/plugin"
+
+import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ContentDelete struct {
+	Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+}
+
+func (m *ContentDelete) Reset()                    { *m = ContentDelete{} }
+func (*ContentDelete) ProtoMessage()               {}
+func (*ContentDelete) Descriptor() ([]byte, []int) { return fileDescriptorContent, []int{0} }
+
+func init() {
+	proto.RegisterType((*ContentDelete)(nil), "containerd.services.events.v1.ContentDelete")
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *ContentDelete) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "digest":
+		return string(m.Digest), len(m.Digest) > 0
+	}
+	return "", false
+}
+func (m *ContentDelete) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ContentDelete) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Digest) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintContent(dAtA, i, uint64(len(m.Digest)))
+		i += copy(dAtA[i:], m.Digest)
+	}
+	return i, nil
+}
+
+func encodeFixed64Content(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Content(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintContent(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *ContentDelete) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Digest)
+	if l > 0 {
+		n += 1 + l + sovContent(uint64(l))
+	}
+	return n
+}
+
+func sovContent(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozContent(x uint64) (n int) {
+	return sovContent(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ContentDelete) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ContentDelete{`,
+		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringContent(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *ContentDelete) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ContentDelete: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ContentDelete: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthContent
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipContent(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthContent
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipContent(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowContent
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowContent
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthContent
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowContent
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipContent(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthContent = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowContent   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/content.proto", fileDescriptorContent)
+}
+
+var fileDescriptorContent = []byte{
+	// 242 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4d, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
+	0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0x82, 0x55, 0xa4, 0xe6, 0x95, 0xe8, 0x15,
+	0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x34, 0xe8, 0xc1, 0x14, 0xeb, 0x41, 0x14, 0xeb, 0x95,
+	0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83, 0x58, 0x10, 0x4d, 0x52, 0x0e,
+	0x04, 0xed, 0x06, 0xab, 0x4b, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0xd3, 0x4f,
+	0xcb, 0x4c, 0xcd, 0x49, 0x29, 0x48, 0x2c, 0xc9, 0x80, 0x98, 0xa0, 0x14, 0xcd, 0xc5, 0xeb, 0x0c,
+	0x71, 0x87, 0x4b, 0x6a, 0x4e, 0x6a, 0x49, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a,
+	0x71, 0x89, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xd1, 0x89, 0x7b, 0xf2, 0x0c, 0xb7, 0xee,
+	0xc9, 0x6b, 0x21, 0x59, 0x95, 0x5f, 0x90, 0x9a, 0x07, 0xb7, 0xa3, 0x58, 0x3f, 0x3d, 0x5f, 0x17,
+	0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41, 0x4d, 0x70, 0x8a, 0x39, 0xf1, 0x50, 0x8e, 0xe1, 0xc6,
+	0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8,
+	0xe0, 0x91, 0x1c, 0xe3, 0x82, 0x2f, 0x72, 0x8c, 0x51, 0x76, 0x64, 0x06, 0x9c, 0x35, 0x84, 0x95,
+	0xc4, 0x06, 0xf6, 0x81, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x51, 0xce, 0xec, 0x89, 0x81, 0x01,
+	0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto
new file mode 100644
index 0000000..95358f5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/content.proto
@@ -0,0 +1,13 @@
+syntax = "proto3";
+
+package containerd.services.events.v1;
+
+import "gogoproto/gogo.proto";
+import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message ContentDelete {
+	string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
new file mode 100644
index 0000000..e894064
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.pb.go
@@ -0,0 +1,1185 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/events/v1/events.proto
+// DO NOT EDIT!
+
+package events
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/containerd/containerd/protobuf/plugin"
+import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
+import google_protobuf2 "github.com/golang/protobuf/ptypes/empty"
+import _ "github.com/gogo/protobuf/types"
+
+import time "time"
+
+import github_com_containerd_typeurl "github.com/containerd/typeurl"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+type PublishRequest struct {
+	Topic string                `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"`
+	Event *google_protobuf1.Any `protobuf:"bytes,2,opt,name=event" json:"event,omitempty"`
+}
+
+func (m *PublishRequest) Reset()                    { *m = PublishRequest{} }
+func (*PublishRequest) ProtoMessage()               {}
+func (*PublishRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{0} }
+
+type ForwardRequest struct {
+	Envelope *Envelope `protobuf:"bytes,1,opt,name=envelope" json:"envelope,omitempty"`
+}
+
+func (m *ForwardRequest) Reset()                    { *m = ForwardRequest{} }
+func (*ForwardRequest) ProtoMessage()               {}
+func (*ForwardRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{1} }
+
+type SubscribeRequest struct {
+	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+}
+
+func (m *SubscribeRequest) Reset()                    { *m = SubscribeRequest{} }
+func (*SubscribeRequest) ProtoMessage()               {}
+func (*SubscribeRequest) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{2} }
+
+type Envelope struct {
+	Timestamp time.Time             `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
+	Namespace string                `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
+	Topic     string                `protobuf:"bytes,3,opt,name=topic,proto3" json:"topic,omitempty"`
+	Event     *google_protobuf1.Any `protobuf:"bytes,4,opt,name=event" json:"event,omitempty"`
+}
+
+func (m *Envelope) Reset()                    { *m = Envelope{} }
+func (*Envelope) ProtoMessage()               {}
+func (*Envelope) Descriptor() ([]byte, []int) { return fileDescriptorEvents, []int{3} }
+
+func init() {
+	proto.RegisterType((*PublishRequest)(nil), "containerd.services.events.v1.PublishRequest")
+	proto.RegisterType((*ForwardRequest)(nil), "containerd.services.events.v1.ForwardRequest")
+	proto.RegisterType((*SubscribeRequest)(nil), "containerd.services.events.v1.SubscribeRequest")
+	proto.RegisterType((*Envelope)(nil), "containerd.services.events.v1.Envelope")
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *Envelope) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	// unhandled: timestamp
+	case "namespace":
+		return string(m.Namespace), len(m.Namespace) > 0
+	case "topic":
+		return string(m.Topic), len(m.Topic) > 0
+	case "event":
+		decoded, err := github_com_containerd_typeurl.UnmarshalAny(m.Event)
+		if err != nil {
+			return "", false
+		}
+
+		adaptor, ok := decoded.(interface {
+			Field([]string) (string, bool)
+		})
+		if !ok {
+			return "", false
+		}
+		return adaptor.Field(fieldpath[1:])
+	}
+	return "", false
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Events service
+
+type EventsClient interface {
+	// Publish an event to a topic.
+	//
+	// The event will be packed into a timestamp envelope with the namespace
+	// introspected from the context. The envelope will then be dispatched.
+	Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	// Forward sends an event that has already been packaged into an envelope
+	// with a timestamp and namespace.
+	//
+	// This is useful if earlier timestamping is required or when fowarding on
+	// behalf of another component, namespace or publisher.
+	Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error)
+	// Subscribe to a stream of events, possibly returning only that match any
+	// of the provided filters.
+	//
+	// Unlike many other methods in containerd, subscribers will get messages
+	// from all namespaces unless otherwise specified. If this is not desired,
+	// a filter can be provided in the format 'namespace==<namespace>' to
+	// restrict the received events.
+	Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error)
+}
+
+type eventsClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewEventsClient(cc *grpc.ClientConn) EventsClient {
+	return &eventsClient{cc}
+}
+
+func (c *eventsClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Publish", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *eventsClient) Forward(ctx context.Context, in *ForwardRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) {
+	out := new(google_protobuf2.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.events.v1.Events/Forward", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *eventsClient) Subscribe(ctx context.Context, in *SubscribeRequest, opts ...grpc.CallOption) (Events_SubscribeClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_Events_serviceDesc.Streams[0], c.cc, "/containerd.services.events.v1.Events/Subscribe", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &eventsSubscribeClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Events_SubscribeClient interface {
+	Recv() (*Envelope, error)
+	grpc.ClientStream
+}
+
+type eventsSubscribeClient struct {
+	grpc.ClientStream
+}
+
+func (x *eventsSubscribeClient) Recv() (*Envelope, error) {
+	m := new(Envelope)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// Server API for Events service
+
+type EventsServer interface {
+	// Publish an event to a topic.
+	//
+	// The event will be packed into a timestamp envelope with the namespace
+	// introspected from the context. The envelope will then be dispatched.
+	Publish(context.Context, *PublishRequest) (*google_protobuf2.Empty, error)
+	// Forward sends an event that has already been packaged into an envelope
+	// with a timestamp and namespace.
+	//
+	// This is useful if earlier timestamping is required or when fowarding on
+	// behalf of another component, namespace or publisher.
+	Forward(context.Context, *ForwardRequest) (*google_protobuf2.Empty, error)
+	// Subscribe to a stream of events, possibly returning only that match any
+	// of the provided filters.
+	//
+	// Unlike many other methods in containerd, subscribers will get messages
+	// from all namespaces unless otherwise specified. If this is not desired,
+	// a filter can be provided in the format 'namespace==<namespace>' to
+	// restrict the received events.
+	Subscribe(*SubscribeRequest, Events_SubscribeServer) error
+}
+
+func RegisterEventsServer(s *grpc.Server, srv EventsServer) {
+	s.RegisterService(&_Events_serviceDesc, srv)
+}
+
+func _Events_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PublishRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(EventsServer).Publish(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.events.v1.Events/Publish",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(EventsServer).Publish(ctx, req.(*PublishRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Events_Forward_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ForwardRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(EventsServer).Forward(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.events.v1.Events/Forward",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(EventsServer).Forward(ctx, req.(*ForwardRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Events_Subscribe_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(SubscribeRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(EventsServer).Subscribe(m, &eventsSubscribeServer{stream})
+}
+
+type Events_SubscribeServer interface {
+	Send(*Envelope) error
+	grpc.ServerStream
+}
+
+type eventsSubscribeServer struct {
+	grpc.ServerStream
+}
+
+func (x *eventsSubscribeServer) Send(m *Envelope) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+var _Events_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.events.v1.Events",
+	HandlerType: (*EventsServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Publish",
+			Handler:    _Events_Publish_Handler,
+		},
+		{
+			MethodName: "Forward",
+			Handler:    _Events_Forward_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Subscribe",
+			Handler:       _Events_Subscribe_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "github.com/containerd/containerd/api/services/events/v1/events.proto",
+}
+
+func (m *PublishRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PublishRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Topic) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
+		i += copy(dAtA[i:], m.Topic)
+	}
+	if m.Event != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
+		n1, err := m.Event.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	return i, nil
+}
+
+func (m *ForwardRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ForwardRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Envelope != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintEvents(dAtA, i, uint64(m.Envelope.Size()))
+		n2, err := m.Envelope.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	return i, nil
+}
+
+func (m *SubscribeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SubscribeRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			dAtA[i] = 0xa
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	return i, nil
+}
+
+func (m *Envelope) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Envelope) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintEvents(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	if len(m.Namespace) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintEvents(dAtA, i, uint64(len(m.Namespace)))
+		i += copy(dAtA[i:], m.Namespace)
+	}
+	if len(m.Topic) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintEvents(dAtA, i, uint64(len(m.Topic)))
+		i += copy(dAtA[i:], m.Topic)
+	}
+	if m.Event != nil {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintEvents(dAtA, i, uint64(m.Event.Size()))
+		n4, err := m.Event.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n4
+	}
+	return i, nil
+}
+
+func encodeFixed64Events(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Events(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintEvents(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *PublishRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Topic)
+	if l > 0 {
+		n += 1 + l + sovEvents(uint64(l))
+	}
+	if m.Event != nil {
+		l = m.Event.Size()
+		n += 1 + l + sovEvents(uint64(l))
+	}
+	return n
+}
+
+func (m *ForwardRequest) Size() (n int) {
+	var l int
+	_ = l
+	if m.Envelope != nil {
+		l = m.Envelope.Size()
+		n += 1 + l + sovEvents(uint64(l))
+	}
+	return n
+}
+
+func (m *SubscribeRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			l = len(s)
+			n += 1 + l + sovEvents(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *Envelope) Size() (n int) {
+	var l int
+	_ = l
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
+	n += 1 + l + sovEvents(uint64(l))
+	l = len(m.Namespace)
+	if l > 0 {
+		n += 1 + l + sovEvents(uint64(l))
+	}
+	l = len(m.Topic)
+	if l > 0 {
+		n += 1 + l + sovEvents(uint64(l))
+	}
+	if m.Event != nil {
+		l = m.Event.Size()
+		n += 1 + l + sovEvents(uint64(l))
+	}
+	return n
+}
+
+func sovEvents(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozEvents(x uint64) (n int) {
+	return sovEvents(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *PublishRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PublishRequest{`,
+		`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
+		`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ForwardRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ForwardRequest{`,
+		`Envelope:` + strings.Replace(fmt.Sprintf("%v", this.Envelope), "Envelope", "Envelope", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SubscribeRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SubscribeRequest{`,
+		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Envelope) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Envelope{`,
+		`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`,
+		`Topic:` + fmt.Sprintf("%v", this.Topic) + `,`,
+		`Event:` + strings.Replace(fmt.Sprintf("%v", this.Event), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringEvents(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *PublishRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEvents
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PublishRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PublishRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEvents
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Topic = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthEvents
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Event == nil {
+				m.Event = &google_protobuf1.Any{}
+			}
+			if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEvents(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthEvents
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ForwardRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEvents
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ForwardRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ForwardRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Envelope", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthEvents
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Envelope == nil {
+				m.Envelope = &Envelope{}
+			}
+			if err := m.Envelope.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEvents(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthEvents
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SubscribeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEvents
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SubscribeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SubscribeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEvents
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEvents(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthEvents
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Envelope) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEvents
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Envelope: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Envelope: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthEvents
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEvents
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespace = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Topic", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEvents
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Topic = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Event", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthEvents
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Event == nil {
+				m.Event = &google_protobuf1.Any{}
+			}
+			if err := m.Event.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEvents(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthEvents
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipEvents(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowEvents
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowEvents
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthEvents
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowEvents
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipEvents(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthEvents = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowEvents   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/events.proto", fileDescriptorEvents)
+}
+
+var fileDescriptorEvents = []byte{
+	// 462 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x93, 0xcd, 0x8e, 0xd3, 0x30,
+	0x14, 0x85, 0xeb, 0xf9, 0x6d, 0x3c, 0xd2, 0x08, 0x45, 0x15, 0x2a, 0x01, 0xd2, 0xaa, 0x1b, 0x2a,
+	0x04, 0x0e, 0x53, 0x76, 0x20, 0x21, 0x28, 0x94, 0xf5, 0x28, 0x80, 0x84, 0xd8, 0x25, 0xe9, 0x6d,
+	0x6a, 0x29, 0xb1, 0x4d, 0xec, 0x04, 0xcd, 0x6e, 0x1e, 0x81, 0x0d, 0x6f, 0xc2, 0x86, 0x37, 0xe8,
+	0x92, 0x25, 0x2b, 0x60, 0xfa, 0x24, 0xa8, 0x89, 0xdd, 0x30, 0x2d, 0x10, 0x34, 0xbb, 0x6b, 0xdf,
+	0xe3, 0xcf, 0xb9, 0xe7, 0x38, 0xf8, 0x45, 0x4c, 0xd5, 0x3c, 0x0f, 0x49, 0xc4, 0x53, 0x2f, 0xe2,
+	0x4c, 0x05, 0x94, 0x41, 0x36, 0xfd, 0xbd, 0x0c, 0x04, 0xf5, 0x24, 0x64, 0x05, 0x8d, 0x40, 0x7a,
+	0x50, 0x00, 0x53, 0xd2, 0x2b, 0x4e, 0x74, 0x45, 0x44, 0xc6, 0x15, 0xb7, 0x6f, 0xd7, 0x7a, 0x62,
+	0xb4, 0x44, 0x2b, 0x8a, 0x13, 0xe7, 0x69, 0xe3, 0x25, 0x25, 0x26, 0xcc, 0x67, 0x9e, 0x48, 0xf2,
+	0x98, 0x32, 0x6f, 0x46, 0x21, 0x99, 0x8a, 0x40, 0xcd, 0xab, 0x0b, 0x9c, 0x4e, 0xcc, 0x63, 0x5e,
+	0x96, 0xde, 0xaa, 0xd2, 0xbb, 0x37, 0x62, 0xce, 0xe3, 0x04, 0xea, 0xd3, 0x01, 0x3b, 0xd3, 0xad,
+	0x9b, 0x9b, 0x2d, 0x48, 0x85, 0x32, 0xcd, 0xde, 0x66, 0x53, 0xd1, 0x14, 0xa4, 0x0a, 0x52, 0x51,
+	0x09, 0x06, 0x3e, 0x3e, 0x3e, 0xcd, 0xc3, 0x84, 0xca, 0xb9, 0x0f, 0xef, 0x73, 0x90, 0xca, 0xee,
+	0xe0, 0x7d, 0xc5, 0x05, 0x8d, 0xba, 0xa8, 0x8f, 0x86, 0x96, 0x5f, 0x2d, 0xec, 0xbb, 0x78, 0xbf,
+	0x9c, 0xb2, 0xbb, 0xd3, 0x47, 0xc3, 0xa3, 0x51, 0x87, 0x54, 0x60, 0x62, 0xc0, 0xe4, 0x19, 0x3b,
+	0xf3, 0x2b, 0xc9, 0xe0, 0x0d, 0x3e, 0x7e, 0xc9, 0xb3, 0x0f, 0x41, 0x36, 0x35, 0xcc, 0xe7, 0xb8,
+	0x0d, 0xac, 0x80, 0x84, 0x0b, 0x28, 0xb1, 0x47, 0xa3, 0x3b, 0xe4, 0x9f, 0x46, 0x92, 0x89, 0x96,
+	0xfb, 0xeb, 0x83, 0x83, 0x7b, 0xf8, 0xda, 0xab, 0x3c, 0x94, 0x51, 0x46, 0x43, 0x30, 0xe0, 0x2e,
+	0x3e, 0x9c, 0xd1, 0x44, 0x41, 0x26, 0xbb, 0xa8, 0xbf, 0x3b, 0xb4, 0x7c, 0xb3, 0x1c, 0x7c, 0x46,
+	0xb8, 0x6d, 0x20, 0xf6, 0x18, 0x5b, 0xeb, 0xc1, 0xf5, 0x07, 0x38, 0x5b, 0x13, 0xbc, 0x36, 0x8a,
+	0x71, 0x7b, 0xf1, 0xbd, 0xd7, 0xfa, 0xf8, 0xa3, 0x87, 0xfc, 0xfa, 0x98, 0x7d, 0x0b, 0x5b, 0x2c,
+	0x48, 0x41, 0x8a, 0x20, 0x82, 0xd2, 0x05, 0xcb, 0xaf, 0x37, 0x6a, 0xd7, 0x76, 0xff, 0xe8, 0xda,
+	0x5e, 0xa3, 0x6b, 0x8f, 0xf6, 0xce, 0xbf, 0xf4, 0xd0, 0xe8, 0xd3, 0x0e, 0x3e, 0x98, 0x94, 0x2e,
+	0xd8, 0xa7, 0xf8, 0x50, 0x47, 0x63, 0xdf, 0x6f, 0x70, 0xeb, 0x72, 0x84, 0xce, 0xf5, 0xad, 0x7b,
+	0x26, 0xab, 0x37, 0xb1, 0x22, 0xea, 0x60, 0x1a, 0x89, 0x97, 0x03, 0xfc, 0x2b, 0x31, 0xc6, 0xd6,
+	0x3a, 0x13, 0xdb, 0x6b, 0x60, 0x6e, 0xa6, 0xe7, 0xfc, 0xef, 0x23, 0x78, 0x80, 0xc6, 0x6f, 0x17,
+	0x17, 0x6e, 0xeb, 0xdb, 0x85, 0xdb, 0x3a, 0x5f, 0xba, 0x68, 0xb1, 0x74, 0xd1, 0xd7, 0xa5, 0x8b,
+	0x7e, 0x2e, 0x5d, 0xf4, 0xee, 0xc9, 0x15, 0xff, 0xeb, 0xc7, 0x55, 0x15, 0x1e, 0x94, 0x23, 0x3d,
+	0xfc, 0x15, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x38, 0x37, 0x72, 0x20, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
new file mode 100644
index 0000000..a20a1e8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/events.proto
@@ -0,0 +1,56 @@
+syntax = "proto3";
+
+package containerd.services.events.v1;
+
+import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+
+service Events {
+	// Publish an event to a topic.
+	//
+	// The event will be packed into a timestamp envelope with the namespace
+	// introspected from the context. The envelope will then be dispatched.
+	rpc Publish(PublishRequest) returns (google.protobuf.Empty);
+
+	// Forward sends an event that has already been packaged into an envelope
+	// with a timestamp and namespace.
+	//
+	// This is useful if earlier timestamping is required or when fowarding on
+	// behalf of another component, namespace or publisher.
+	rpc Forward(ForwardRequest) returns (google.protobuf.Empty);
+
+	// Subscribe to a stream of events, possibly returning only that match any
+	// of the provided filters.
+	//
+	// Unlike many other methods in containerd, subscribers will get messages
+	// from all namespaces unless otherwise specified. If this is not desired,
+	// a filter can be provided in the format 'namespace==<namespace>' to
+	// restrict the received events.
+	rpc Subscribe(SubscribeRequest) returns (stream Envelope);
+}
+
+message PublishRequest {
+	string topic = 1;
+	google.protobuf.Any event = 2;
+}
+
+message ForwardRequest {
+	Envelope envelope = 1;
+}
+
+message SubscribeRequest {
+	repeated string filters = 1;
+}
+
+message Envelope {
+	option (containerd.plugin.fieldpath) = true;
+	google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+	string namespace = 2;
+	string topic = 3;
+	google.protobuf.Any event = 4;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go
new file mode 100644
index 0000000..0349e7c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/image.pb.go
@@ -0,0 +1,963 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/events/v1/image.proto
+// DO NOT EDIT!
+
+package events
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/containerd/containerd/protobuf/plugin"
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ImageCreate struct {
+	Name   string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *ImageCreate) Reset()                    { *m = ImageCreate{} }
+func (*ImageCreate) ProtoMessage()               {}
+func (*ImageCreate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{0} }
+
+type ImageUpdate struct {
+	Name   string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *ImageUpdate) Reset()                    { *m = ImageUpdate{} }
+func (*ImageUpdate) ProtoMessage()               {}
+func (*ImageUpdate) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{1} }
+
+type ImageDelete struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *ImageDelete) Reset()                    { *m = ImageDelete{} }
+func (*ImageDelete) ProtoMessage()               {}
+func (*ImageDelete) Descriptor() ([]byte, []int) { return fileDescriptorImage, []int{2} }
+
+func init() {
+	proto.RegisterType((*ImageCreate)(nil), "containerd.services.images.v1.ImageCreate")
+	proto.RegisterType((*ImageUpdate)(nil), "containerd.services.images.v1.ImageUpdate")
+	proto.RegisterType((*ImageDelete)(nil), "containerd.services.images.v1.ImageDelete")
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *ImageCreate) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "name":
+		return string(m.Name), len(m.Name) > 0
+	case "labels":
+		// Labels fields have been special-cased by name. If this breaks,
+		// add better special casing to fieldpath plugin.
+		if len(m.Labels) == 0 {
+			return "", false
+		}
+		value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
+		return value, ok
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *ImageUpdate) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "name":
+		return string(m.Name), len(m.Name) > 0
+	case "labels":
+		// Labels fields have been special-cased by name. If this breaks,
+		// add better special casing to fieldpath plugin.
+		if len(m.Labels) == 0 {
+			return "", false
+		}
+		value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
+		return value, ok
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *ImageDelete) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "name":
+		return string(m.Name), len(m.Name) > 0
+	}
+	return "", false
+}
+func (m *ImageCreate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ImageCreate) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x12
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
+			i = encodeVarintImage(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintImage(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintImage(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *ImageUpdate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ImageUpdate) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x12
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
+			i = encodeVarintImage(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintImage(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintImage(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *ImageDelete) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ImageDelete) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintImage(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func encodeFixed64Image(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Image(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintImage(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *ImageCreate) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovImage(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
+			n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *ImageUpdate) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovImage(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovImage(uint64(len(k))) + 1 + len(v) + sovImage(uint64(len(v)))
+			n += mapEntrySize + 1 + sovImage(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *ImageDelete) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovImage(uint64(l))
+	}
+	return n
+}
+
+func sovImage(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozImage(x uint64) (n int) {
+	return sovImage(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *ImageCreate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&ImageCreate{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ImageUpdate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&ImageUpdate{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ImageDelete) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ImageDelete{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringImage(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *ImageCreate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImage
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ImageCreate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ImageCreate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthImage
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImage
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthImage
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowImage
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowImage
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthImage
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImage(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImage
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ImageUpdate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImage
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ImageUpdate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ImageUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthImage
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImage
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthImage
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowImage
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowImage
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthImage
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImage(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImage
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ImageDelete) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImage
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ImageDelete: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ImageDelete: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthImage
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImage(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImage
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipImage(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowImage
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowImage
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthImage
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowImage
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipImage(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthImage = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowImage   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/image.proto", fileDescriptorImage)
+}
+
+var fileDescriptorImage = []byte{
+	// 296 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4e, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
+	0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x67, 0xe6, 0x26, 0xa6, 0xa7, 0xea,
+	0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x94, 0xeb, 0xc1, 0x94, 0xea, 0x81, 0x15, 0x14,
+	0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, 0xb4, 0x03, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e, 0x41, 0x4e,
+	0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06, 0xc4, 0x02,
+	0xa5, 0x35, 0x8c, 0x5c, 0xdc, 0x9e, 0x20, 0xf3, 0x9c, 0x8b, 0x52, 0x13, 0x4b, 0x52, 0x85, 0x84,
+	0xb8, 0x58, 0xf2, 0x12, 0x73, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0x21,
+	0x3f, 0x2e, 0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23,
+	0x33, 0x3d, 0xbc, 0xae, 0xd2, 0x43, 0x32, 0x4f, 0xcf, 0x07, 0xac, 0xd1, 0x35, 0xaf, 0xa4, 0xa8,
+	0x32, 0x08, 0x6a, 0x8a, 0x94, 0x25, 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a,
+	0x25, 0xd4, 0x46, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09,
+	0x2c, 0x06, 0xe1, 0x58, 0x31, 0x59, 0x30, 0x22, 0x9c, 0x1b, 0x5a, 0x90, 0x42, 0x55, 0xe7, 0x42,
+	0xcc, 0xa3, 0xb6, 0x73, 0x15, 0xa1, 0xae, 0x75, 0x49, 0xcd, 0x49, 0xc5, 0xee, 0x5a, 0xa7, 0x98,
+	0x13, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0x68, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c,
+	0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x2e, 0xf8, 0x22, 0xc7, 0x18, 0x65, 0x47,
+	0x66, 0x22, 0xb2, 0x86, 0xb0, 0x92, 0xd8, 0xc0, 0xb1, 0x6c, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff,
+	0x44, 0x99, 0x59, 0x31, 0x8d, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/image.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/image.proto
new file mode 100644
index 0000000..cbab0bb
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/image.proto
@@ -0,0 +1,22 @@
+syntax = "proto3";
+
+package containerd.services.images.v1;
+
+import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message ImageCreate {
+	string name = 1;
+	map<string, string> labels = 2;
+}
+
+message ImageUpdate {
+	string name = 1;
+	map<string, string> labels = 2;
+}
+
+message ImageDelete {
+	string name = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go
new file mode 100644
index 0000000..c2aabe4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.pb.go
@@ -0,0 +1,965 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/events/v1/namespace.proto
+// DO NOT EDIT!
+
+package events
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/containerd/containerd/protobuf/plugin"
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type NamespaceCreate struct {
+	Name   string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *NamespaceCreate) Reset()                    { *m = NamespaceCreate{} }
+func (*NamespaceCreate) ProtoMessage()               {}
+func (*NamespaceCreate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{0} }
+
+type NamespaceUpdate struct {
+	Name   string            `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *NamespaceUpdate) Reset()                    { *m = NamespaceUpdate{} }
+func (*NamespaceUpdate) ProtoMessage()               {}
+func (*NamespaceUpdate) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{1} }
+
+type NamespaceDelete struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *NamespaceDelete) Reset()                    { *m = NamespaceDelete{} }
+func (*NamespaceDelete) ProtoMessage()               {}
+func (*NamespaceDelete) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
+
+func init() {
+	proto.RegisterType((*NamespaceCreate)(nil), "containerd.services.events.v1.NamespaceCreate")
+	proto.RegisterType((*NamespaceUpdate)(nil), "containerd.services.events.v1.NamespaceUpdate")
+	proto.RegisterType((*NamespaceDelete)(nil), "containerd.services.events.v1.NamespaceDelete")
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *NamespaceCreate) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "name":
+		return string(m.Name), len(m.Name) > 0
+	case "labels":
+		// Labels fields have been special-cased by name. If this breaks,
+		// add better special casing to fieldpath plugin.
+		if len(m.Labels) == 0 {
+			return "", false
+		}
+		value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
+		return value, ok
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *NamespaceUpdate) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "name":
+		return string(m.Name), len(m.Name) > 0
+	case "labels":
+		// Labels fields have been special-cased by name. If this breaks,
+		// add better special casing to fieldpath plugin.
+		if len(m.Labels) == 0 {
+			return "", false
+		}
+		value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
+		return value, ok
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *NamespaceDelete) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "name":
+		return string(m.Name), len(m.Name) > 0
+	}
+	return "", false
+}
+func (m *NamespaceCreate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NamespaceCreate) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x12
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
+			i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *NamespaceUpdate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NamespaceUpdate) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x12
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
+			i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *NamespaceDelete) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *NamespaceDelete) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func encodeFixed64Namespace(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Namespace(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *NamespaceCreate) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovNamespace(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
+			n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *NamespaceUpdate) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovNamespace(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
+			n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *NamespaceDelete) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovNamespace(uint64(l))
+	}
+	return n
+}
+
+func sovNamespace(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozNamespace(x uint64) (n int) {
+	return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *NamespaceCreate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&NamespaceCreate{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NamespaceUpdate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&NamespaceUpdate{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *NamespaceDelete) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&NamespaceDelete{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringNamespace(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *NamespaceCreate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NamespaceCreate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NamespaceCreate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowNamespace
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowNamespace
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthNamespace
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NamespaceUpdate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NamespaceUpdate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NamespaceUpdate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowNamespace
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowNamespace
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthNamespace
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *NamespaceDelete) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: NamespaceDelete: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: NamespaceDelete: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipNamespace(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthNamespace
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowNamespace
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipNamespace(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowNamespace   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/namespace.proto", fileDescriptorNamespace)
+}
+
+var fileDescriptorNamespace = []byte{
+	// 310 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4f, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
+	0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0xe7, 0x25, 0xe6, 0xa6, 0x16, 0x17,
+	0x24, 0x26, 0xa7, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0xb4, 0xe8, 0xc1, 0x94,
+	0xeb, 0x41, 0x94, 0xeb, 0x95, 0x19, 0x4a, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x55, 0xea, 0x83,
+	0x58, 0x10, 0x4d, 0x52, 0x0e, 0x04, 0x6d, 0x07, 0xab, 0x4b, 0x2a, 0x4d, 0xd3, 0x2f, 0xc8, 0x29,
+	0x4d, 0xcf, 0xcc, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, 0x49, 0x29, 0x48, 0x2c, 0xc9, 0x80, 0x98, 0xa0,
+	0xb4, 0x85, 0x91, 0x8b, 0xdf, 0x0f, 0xe6, 0x14, 0xe7, 0xa2, 0xd4, 0xc4, 0x92, 0x54, 0x21, 0x21,
+	0x2e, 0x16, 0x90, 0xeb, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xc0, 0x6c, 0xa1, 0x20, 0x2e,
+	0xb6, 0x9c, 0xc4, 0xa4, 0xd4, 0x9c, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x2b, 0x3d,
+	0xbc, 0xee, 0xd5, 0x43, 0x33, 0x53, 0xcf, 0x07, 0xac, 0xd9, 0x35, 0xaf, 0xa4, 0xa8, 0x32, 0x08,
+	0x6a, 0x92, 0x94, 0x25, 0x17, 0x37, 0x92, 0xb0, 0x90, 0x00, 0x17, 0x73, 0x76, 0x6a, 0x25, 0xd4,
+	0x56, 0x10, 0x53, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, 0x34, 0x55, 0x82, 0x09, 0x2c, 0x06,
+	0xe1, 0x58, 0x31, 0x59, 0x30, 0xa2, 0x3a, 0x3b, 0xb4, 0x20, 0x85, 0xea, 0xce, 0x86, 0x98, 0x49,
+	0x6d, 0x67, 0xab, 0x22, 0xb9, 0xda, 0x25, 0x35, 0x27, 0x15, 0xbb, 0xab, 0x9d, 0x62, 0x4e, 0x3c,
+	0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17,
+	0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xb8, 0xe0, 0x8b, 0x1c, 0x63, 0x94, 0x1d, 0x99, 0x49,
+	0xce, 0x1a, 0xc2, 0x4a, 0x62, 0x03, 0xc7, 0xbc, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x58, 0x7e,
+	0x6c, 0xc6, 0xbb, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto
new file mode 100644
index 0000000..2d6be2b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/namespace.proto
@@ -0,0 +1,23 @@
+syntax = "proto3";
+
+package containerd.services.events.v1;
+
+import "gogoproto/gogo.proto";
+import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message NamespaceCreate {
+	string name = 1;
+	map<string, string> labels  = 2;
+}
+
+message NamespaceUpdate {
+	string name = 1;
+	map<string, string> labels  = 2;
+}
+
+message NamespaceDelete {
+	string name = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go
new file mode 100644
index 0000000..265d47a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.pb.go
@@ -0,0 +1,723 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/events/v1/snapshot.proto
+// DO NOT EDIT!
+
+package events
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/containerd/containerd/protobuf/plugin"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type SnapshotPrepare struct {
+	Key    string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"`
+}
+
+func (m *SnapshotPrepare) Reset()                    { *m = SnapshotPrepare{} }
+func (*SnapshotPrepare) ProtoMessage()               {}
+func (*SnapshotPrepare) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{0} }
+
+type SnapshotCommit struct {
+	Key  string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *SnapshotCommit) Reset()                    { *m = SnapshotCommit{} }
+func (*SnapshotCommit) ProtoMessage()               {}
+func (*SnapshotCommit) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{1} }
+
+type SnapshotRemove struct {
+	Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *SnapshotRemove) Reset()                    { *m = SnapshotRemove{} }
+func (*SnapshotRemove) ProtoMessage()               {}
+func (*SnapshotRemove) Descriptor() ([]byte, []int) { return fileDescriptorSnapshot, []int{2} }
+
+func init() {
+	proto.RegisterType((*SnapshotPrepare)(nil), "containerd.services.events.v1.SnapshotPrepare")
+	proto.RegisterType((*SnapshotCommit)(nil), "containerd.services.events.v1.SnapshotCommit")
+	proto.RegisterType((*SnapshotRemove)(nil), "containerd.services.events.v1.SnapshotRemove")
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *SnapshotPrepare) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "key":
+		return string(m.Key), len(m.Key) > 0
+	case "parent":
+		return string(m.Parent), len(m.Parent) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *SnapshotCommit) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "key":
+		return string(m.Key), len(m.Key) > 0
+	case "name":
+		return string(m.Name), len(m.Name) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *SnapshotRemove) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "key":
+		return string(m.Key), len(m.Key) > 0
+	}
+	return "", false
+}
+func (m *SnapshotPrepare) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotPrepare) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Key) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	if len(m.Parent) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Parent)))
+		i += copy(dAtA[i:], m.Parent)
+	}
+	return i, nil
+}
+
+func (m *SnapshotCommit) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotCommit) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Key) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	if len(m.Name) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func (m *SnapshotRemove) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotRemove) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Key) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshot(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	return i, nil
+}
+
+func encodeFixed64Snapshot(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Snapshot(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintSnapshot(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *SnapshotPrepare) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshot(uint64(l))
+	}
+	l = len(m.Parent)
+	if l > 0 {
+		n += 1 + l + sovSnapshot(uint64(l))
+	}
+	return n
+}
+
+func (m *SnapshotCommit) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshot(uint64(l))
+	}
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovSnapshot(uint64(l))
+	}
+	return n
+}
+
+func (m *SnapshotRemove) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshot(uint64(l))
+	}
+	return n
+}
+
+func sovSnapshot(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozSnapshot(x uint64) (n int) {
+	return sovSnapshot(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *SnapshotPrepare) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SnapshotPrepare{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SnapshotCommit) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SnapshotCommit{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *SnapshotRemove) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&SnapshotRemove{`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringSnapshot(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *SnapshotPrepare) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshot
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotPrepare: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotPrepare: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshot
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshot
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Parent = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshot(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotCommit) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshot
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotCommit: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotCommit: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshot
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshot
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshot(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotRemove) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshot
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotRemove: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotRemove: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshot
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshot(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshot
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipSnapshot(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowSnapshot
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowSnapshot
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowSnapshot
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthSnapshot
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowSnapshot
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipSnapshot(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthSnapshot = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowSnapshot   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/snapshot.proto", fileDescriptorSnapshot)
+}
+
+var fileDescriptorSnapshot = []byte{
+	// 252 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
+	0xa7, 0x96, 0xa5, 0xe6, 0x95, 0x14, 0xeb, 0x97, 0x19, 0xea, 0x17, 0xe7, 0x25, 0x16, 0x14, 0x67,
+	0xe4, 0x97, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x22, 0x74, 0xe8, 0xc1, 0x54, 0xeb,
+	0x41, 0x54, 0xeb, 0x95, 0x19, 0x4a, 0x39, 0x10, 0xb4, 0x06, 0x6c, 0x4c, 0x52, 0x69, 0x9a, 0x7e,
+	0x41, 0x4e, 0x69, 0x7a, 0x66, 0x9e, 0x7e, 0x5a, 0x66, 0x6a, 0x4e, 0x4a, 0x41, 0x62, 0x49, 0x06,
+	0xc4, 0x02, 0x25, 0x6b, 0x2e, 0xfe, 0x60, 0xa8, 0x95, 0x01, 0x45, 0xa9, 0x05, 0x89, 0x45, 0xa9,
+	0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6,
+	0x90, 0x18, 0x17, 0x1b, 0x48, 0x26, 0xaf, 0x44, 0x82, 0x09, 0x2c, 0x08, 0xe5, 0x29, 0x99, 0x71,
+	0xf1, 0xc1, 0x34, 0x3b, 0xe7, 0xe7, 0xe6, 0x66, 0x96, 0x60, 0xd1, 0x2b, 0xc4, 0xc5, 0x92, 0x97,
+	0x98, 0x9b, 0x0a, 0xd5, 0x09, 0x66, 0x2b, 0x29, 0x21, 0xf4, 0x05, 0xa5, 0xe6, 0xe6, 0x97, 0x61,
+	0xb1, 0xd3, 0x29, 0xe6, 0xc4, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x1a, 0x1e, 0xc9, 0x31,
+	0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x0b, 0xbe, 0xc8,
+	0x31, 0x46, 0xd9, 0x91, 0x19, 0xbe, 0xd6, 0x10, 0x56, 0x12, 0x1b, 0xd8, 0xf7, 0xc6, 0x80, 0x00,
+	0x00, 0x00, 0xff, 0xff, 0x3a, 0x82, 0x7a, 0xa7, 0xa8, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto
new file mode 100644
index 0000000..b6af0ea
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/snapshot.proto
@@ -0,0 +1,22 @@
+syntax = "proto3";
+
+package containerd.services.events.v1;
+
+import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message SnapshotPrepare {
+	string key = 1;
+	string parent = 2;
+}
+
+message SnapshotCommit {
+	string key = 1;
+	string name = 2;
+}
+
+message SnapshotRemove {
+	string key = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go b/vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go
new file mode 100644
index 0000000..97faa38
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/task.pb.go
@@ -0,0 +1,2628 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/events/v1/task.proto
+// DO NOT EDIT!
+
+package events
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/gogo/protobuf/types"
+import containerd_types "github.com/containerd/containerd/api/types"
+import _ "github.com/containerd/containerd/protobuf/plugin"
+
+import time "time"
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+type TaskCreate struct {
+	ContainerID string                    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Bundle      string                    `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
+	Rootfs      []*containerd_types.Mount `protobuf:"bytes,3,rep,name=rootfs" json:"rootfs,omitempty"`
+	IO          *TaskIO                   `protobuf:"bytes,4,opt,name=io" json:"io,omitempty"`
+	Checkpoint  string                    `protobuf:"bytes,5,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+	Pid         uint32                    `protobuf:"varint,6,opt,name=pid,proto3" json:"pid,omitempty"`
+}
+
+func (m *TaskCreate) Reset()                    { *m = TaskCreate{} }
+func (*TaskCreate) ProtoMessage()               {}
+func (*TaskCreate) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
+
+type TaskStart struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Pid         uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+}
+
+func (m *TaskStart) Reset()                    { *m = TaskStart{} }
+func (*TaskStart) ProtoMessage()               {}
+func (*TaskStart) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} }
+
+type TaskDelete struct {
+	ContainerID string    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Pid         uint32    `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	ExitStatus  uint32    `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt    time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+}
+
+func (m *TaskDelete) Reset()                    { *m = TaskDelete{} }
+func (*TaskDelete) ProtoMessage()               {}
+func (*TaskDelete) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{2} }
+
+type TaskIO struct {
+	Stdin    string `protobuf:"bytes,1,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout   string `protobuf:"bytes,2,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr   string `protobuf:"bytes,3,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal bool   `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
+}
+
+func (m *TaskIO) Reset()                    { *m = TaskIO{} }
+func (*TaskIO) ProtoMessage()               {}
+func (*TaskIO) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{3} }
+
+type TaskExit struct {
+	ContainerID string    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ID          string    `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+	Pid         uint32    `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+	ExitStatus  uint32    `protobuf:"varint,4,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt    time.Time `protobuf:"bytes,5,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+}
+
+func (m *TaskExit) Reset()                    { *m = TaskExit{} }
+func (*TaskExit) ProtoMessage()               {}
+func (*TaskExit) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{4} }
+
+type TaskOOM struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+}
+
+func (m *TaskOOM) Reset()                    { *m = TaskOOM{} }
+func (*TaskOOM) ProtoMessage()               {}
+func (*TaskOOM) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{5} }
+
+type TaskExecAdded struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+}
+
+func (m *TaskExecAdded) Reset()                    { *m = TaskExecAdded{} }
+func (*TaskExecAdded) ProtoMessage()               {}
+func (*TaskExecAdded) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{6} }
+
+type TaskExecStarted struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	Pid         uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+}
+
+func (m *TaskExecStarted) Reset()                    { *m = TaskExecStarted{} }
+func (*TaskExecStarted) ProtoMessage()               {}
+func (*TaskExecStarted) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{7} }
+
+type TaskPaused struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+}
+
+func (m *TaskPaused) Reset()                    { *m = TaskPaused{} }
+func (*TaskPaused) ProtoMessage()               {}
+func (*TaskPaused) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{8} }
+
+type TaskResumed struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+}
+
+func (m *TaskResumed) Reset()                    { *m = TaskResumed{} }
+func (*TaskResumed) ProtoMessage()               {}
+func (*TaskResumed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{9} }
+
+type TaskCheckpointed struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Checkpoint  string `protobuf:"bytes,2,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+}
+
+func (m *TaskCheckpointed) Reset()                    { *m = TaskCheckpointed{} }
+func (*TaskCheckpointed) ProtoMessage()               {}
+func (*TaskCheckpointed) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{10} }
+
+func init() {
+	proto.RegisterType((*TaskCreate)(nil), "containerd.services.events.v1.TaskCreate")
+	proto.RegisterType((*TaskStart)(nil), "containerd.services.events.v1.TaskStart")
+	proto.RegisterType((*TaskDelete)(nil), "containerd.services.events.v1.TaskDelete")
+	proto.RegisterType((*TaskIO)(nil), "containerd.services.events.v1.TaskIO")
+	proto.RegisterType((*TaskExit)(nil), "containerd.services.events.v1.TaskExit")
+	proto.RegisterType((*TaskOOM)(nil), "containerd.services.events.v1.TaskOOM")
+	proto.RegisterType((*TaskExecAdded)(nil), "containerd.services.events.v1.TaskExecAdded")
+	proto.RegisterType((*TaskExecStarted)(nil), "containerd.services.events.v1.TaskExecStarted")
+	proto.RegisterType((*TaskPaused)(nil), "containerd.services.events.v1.TaskPaused")
+	proto.RegisterType((*TaskResumed)(nil), "containerd.services.events.v1.TaskResumed")
+	proto.RegisterType((*TaskCheckpointed)(nil), "containerd.services.events.v1.TaskCheckpointed")
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskCreate) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	// unhandled: rootfs
+	// unhandled: pid
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	case "bundle":
+		return string(m.Bundle), len(m.Bundle) > 0
+	case "io":
+		// NOTE(stevvooe): This is probably not correct in many cases.
+		// We assume that the target message also implements the Field
+		// method, which isn't likely true in a lot of cases.
+		//
+		// If you have a broken build and have found this comment,
+		// you may be closer to a solution.
+		if m.IO == nil {
+			return "", false
+		}
+
+		return m.IO.Field(fieldpath[1:])
+	case "checkpoint":
+		return string(m.Checkpoint), len(m.Checkpoint) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskStart) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	// unhandled: pid
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskDelete) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	// unhandled: pid
+	// unhandled: exit_status
+	// unhandled: exited_at
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskIO) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "stdin":
+		return string(m.Stdin), len(m.Stdin) > 0
+	case "stdout":
+		return string(m.Stdout), len(m.Stdout) > 0
+	case "stderr":
+		return string(m.Stderr), len(m.Stderr) > 0
+	case "terminal":
+		return fmt.Sprint(m.Terminal), true
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskExit) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	// unhandled: pid
+	// unhandled: exit_status
+	// unhandled: exited_at
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	case "id":
+		return string(m.ID), len(m.ID) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskOOM) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskExecAdded) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	case "exec_id":
+		return string(m.ExecID), len(m.ExecID) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskExecStarted) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	// unhandled: pid
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	case "exec_id":
+		return string(m.ExecID), len(m.ExecID) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskPaused) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskResumed) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	}
+	return "", false
+}
+
+// Field returns the value for the given fieldpath as a string, if defined.
+// If the value is not defined, the second value will be false.
+func (m *TaskCheckpointed) Field(fieldpath []string) (string, bool) {
+	if len(fieldpath) == 0 {
+		return "", false
+	}
+
+	switch fieldpath[0] {
+	case "container_id":
+		return string(m.ContainerID), len(m.ContainerID) > 0
+	case "checkpoint":
+		return string(m.Checkpoint), len(m.Checkpoint) > 0
+	}
+	return "", false
+}
+func (m *TaskCreate) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskCreate) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.Bundle) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Bundle)))
+		i += copy(dAtA[i:], m.Bundle)
+	}
+	if len(m.Rootfs) > 0 {
+		for _, msg := range m.Rootfs {
+			dAtA[i] = 0x1a
+			i++
+			i = encodeVarintTask(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if m.IO != nil {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.IO.Size()))
+		n1, err := m.IO.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	if len(m.Checkpoint) > 0 {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint)))
+		i += copy(dAtA[i:], m.Checkpoint)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x30
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+	}
+	return i, nil
+}
+
+func (m *TaskStart) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskStart) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+	}
+	return i, nil
+}
+
+func (m *TaskDelete) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskDelete) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+	}
+	if m.ExitStatus != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
+	}
+	dAtA[i] = 0x22
+	i++
+	i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	return i, nil
+}
+
+func (m *TaskIO) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskIO) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Stdin) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin)))
+		i += copy(dAtA[i:], m.Stdin)
+	}
+	if len(m.Stdout) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout)))
+		i += copy(dAtA[i:], m.Stdout)
+	}
+	if len(m.Stderr) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr)))
+		i += copy(dAtA[i:], m.Stderr)
+	}
+	if m.Terminal {
+		dAtA[i] = 0x20
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	return i, nil
+}
+
+func (m *TaskExit) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskExit) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+	}
+	if m.ExitStatus != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
+	}
+	dAtA[i] = 0x2a
+	i++
+	i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	return i, nil
+}
+
+func (m *TaskOOM) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskOOM) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	return i, nil
+}
+
+func (m *TaskExecAdded) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskExecAdded) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	return i, nil
+}
+
+func (m *TaskExecStarted) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskExecStarted) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+	}
+	return i, nil
+}
+
+func (m *TaskPaused) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskPaused) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	return i, nil
+}
+
+func (m *TaskResumed) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskResumed) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	return i, nil
+}
+
+func (m *TaskCheckpointed) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TaskCheckpointed) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.Checkpoint) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Checkpoint)))
+		i += copy(dAtA[i:], m.Checkpoint)
+	}
+	return i, nil
+}
+
+func encodeFixed64Task(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Task(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *TaskCreate) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.Bundle)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if len(m.Rootfs) > 0 {
+		for _, e := range m.Rootfs {
+			l = e.Size()
+			n += 1 + l + sovTask(uint64(l))
+		}
+	}
+	if m.IO != nil {
+		l = m.IO.Size()
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.Checkpoint)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovTask(uint64(m.Pid))
+	}
+	return n
+}
+
+func (m *TaskStart) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovTask(uint64(m.Pid))
+	}
+	return n
+}
+
+func (m *TaskDelete) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovTask(uint64(m.Pid))
+	}
+	if m.ExitStatus != 0 {
+		n += 1 + sovTask(uint64(m.ExitStatus))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
+	n += 1 + l + sovTask(uint64(l))
+	return n
+}
+
+func (m *TaskIO) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Stdin)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.Stdout)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.Stderr)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if m.Terminal {
+		n += 2
+	}
+	return n
+}
+
+func (m *TaskExit) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovTask(uint64(m.Pid))
+	}
+	if m.ExitStatus != 0 {
+		n += 1 + sovTask(uint64(m.ExitStatus))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
+	n += 1 + l + sovTask(uint64(l))
+	return n
+}
+
+func (m *TaskOOM) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	return n
+}
+
+func (m *TaskExecAdded) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	return n
+}
+
+func (m *TaskExecStarted) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovTask(uint64(m.Pid))
+	}
+	return n
+}
+
+func (m *TaskPaused) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	return n
+}
+
+func (m *TaskResumed) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	return n
+}
+
+func (m *TaskCheckpointed) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.Checkpoint)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	return n
+}
+
+func sovTask(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozTask(x uint64) (n int) {
+	return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *TaskCreate) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskCreate{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`,
+		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`,
+		`IO:` + strings.Replace(fmt.Sprintf("%v", this.IO), "TaskIO", "TaskIO", 1) + `,`,
+		`Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskStart) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskStart{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskDelete) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskDelete{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskIO) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskIO{`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskExit) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskExit{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskOOM) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskOOM{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskExecAdded) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskExecAdded{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskExecStarted) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskExecStarted{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskPaused) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskPaused{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskResumed) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskResumed{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *TaskCheckpointed) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&TaskCheckpointed{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringTask(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *TaskCreate) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskCreate: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskCreate: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Bundle = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Rootfs = append(m.Rootfs, &containerd_types.Mount{})
+			if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IO", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.IO == nil {
+				m.IO = &TaskIO{}
+			}
+			if err := m.IO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Checkpoint = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskStart) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskStart: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskStart: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskDelete) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskDelete: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskDelete: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
+			}
+			m.ExitStatus = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskIO) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskIO: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskIO: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdin = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdout = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stderr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskExit) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskExit: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskExit: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
+			}
+			m.ExitStatus = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskOOM) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskOOM: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskOOM: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskExecAdded) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskExecAdded: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskExecAdded: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskExecStarted) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskExecStarted: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskExecStarted: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskPaused) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskPaused: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskPaused: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskResumed) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskResumed: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskResumed: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TaskCheckpointed) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TaskCheckpointed: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TaskCheckpointed: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Checkpoint = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipTask(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthTask
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowTask
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipTask(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowTask   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/events/v1/task.proto", fileDescriptorTask)
+}
+
+var fileDescriptorTask = []byte{
+	// 648 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0xc1, 0x6e, 0xd3, 0x40,
+	0x10, 0x86, 0x6b, 0xa7, 0x75, 0x93, 0x0d, 0x55, 0x2b, 0xab, 0x82, 0x28, 0x12, 0x76, 0x64, 0x84,
+	0x94, 0x93, 0xad, 0x16, 0x89, 0x0b, 0x2a, 0x6a, 0xd2, 0x70, 0xc8, 0xa1, 0x0a, 0xb8, 0x3d, 0x21,
+	0xa4, 0xc8, 0xb1, 0x27, 0xc9, 0xd2, 0xc4, 0x6b, 0x79, 0xc7, 0x51, 0x91, 0x38, 0xf0, 0x08, 0x3c,
+	0x02, 0x4f, 0xc1, 0x33, 0xf4, 0xc0, 0x81, 0x23, 0xa7, 0x40, 0xfd, 0x0c, 0x9c, 0x38, 0xa1, 0xf5,
+	0x3a, 0x6e, 0xa1, 0xa2, 0x20, 0x4b, 0xdc, 0x76, 0xc6, 0x33, 0xff, 0xcc, 0x7c, 0x3b, 0xd9, 0x90,
+	0xee, 0x84, 0xe2, 0x34, 0x19, 0xd9, 0x3e, 0x9b, 0x3b, 0x3e, 0x0b, 0xd1, 0xa3, 0x21, 0xc4, 0xc1,
+	0xf5, 0xa3, 0x17, 0x51, 0x87, 0x43, 0xbc, 0xa0, 0x3e, 0x70, 0x07, 0x16, 0x10, 0x22, 0x77, 0x16,
+	0x7b, 0x0e, 0x7a, 0xfc, 0xcc, 0x8e, 0x62, 0x86, 0x4c, 0xbf, 0x7f, 0x15, 0x6d, 0xaf, 0x22, 0x6d,
+	0x19, 0x69, 0x2f, 0xf6, 0x9a, 0xbb, 0x13, 0x36, 0x61, 0x59, 0xa4, 0x23, 0x4e, 0x32, 0xa9, 0x69,
+	0x4e, 0x18, 0x9b, 0xcc, 0xc0, 0xc9, 0xac, 0x51, 0x32, 0x76, 0x90, 0xce, 0x81, 0xa3, 0x37, 0x8f,
+	0xf2, 0x80, 0xc7, 0xff, 0xd4, 0x19, 0xbe, 0x89, 0x80, 0x3b, 0x73, 0x96, 0x84, 0x98, 0xe7, 0x1d,
+	0xfe, 0x35, 0xaf, 0x28, 0x19, 0xcd, 0x92, 0x09, 0x0d, 0x9d, 0x31, 0x85, 0x59, 0x10, 0x79, 0x38,
+	0x95, 0x0a, 0xd6, 0x0f, 0x85, 0x90, 0x53, 0x8f, 0x9f, 0x1d, 0xc5, 0xe0, 0x21, 0xe8, 0xfb, 0xe4,
+	0x4e, 0x91, 0x3c, 0xa4, 0x41, 0x43, 0x69, 0x29, 0xed, 0x5a, 0x77, 0x3b, 0x5d, 0x9a, 0xf5, 0xa3,
+	0x95, 0xbf, 0xdf, 0x73, 0xeb, 0x45, 0x50, 0x3f, 0xd0, 0xef, 0x12, 0x6d, 0x94, 0x84, 0xc1, 0x0c,
+	0x1a, 0xaa, 0x88, 0x76, 0x73, 0x4b, 0x77, 0x88, 0x16, 0x33, 0x86, 0x63, 0xde, 0xa8, 0xb4, 0x2a,
+	0xed, 0xfa, 0xfe, 0x3d, 0xfb, 0x1a, 0xbb, 0x6c, 0x16, 0xfb, 0x58, 0xcc, 0xe2, 0xe6, 0x61, 0xfa,
+	0x01, 0x51, 0x29, 0x6b, 0xac, 0xb7, 0x94, 0x76, 0x7d, 0xff, 0xa1, 0x7d, 0x2b, 0x68, 0x5b, 0xf4,
+	0xdc, 0x1f, 0x74, 0xb5, 0x74, 0x69, 0xaa, 0xfd, 0x81, 0xab, 0x52, 0xa6, 0x1b, 0x84, 0xf8, 0x53,
+	0xf0, 0xcf, 0x22, 0x46, 0x43, 0x6c, 0x6c, 0x64, 0xbd, 0x5c, 0xf3, 0xe8, 0x3b, 0xa4, 0x12, 0xd1,
+	0xa0, 0xa1, 0xb5, 0x94, 0xf6, 0x96, 0x2b, 0x8e, 0xd6, 0x0b, 0x52, 0x13, 0x3a, 0x27, 0xe8, 0xc5,
+	0x58, 0x6a, 0xf4, 0x5c, 0x52, 0xbd, 0x92, 0xfc, 0x98, 0xf3, 0xec, 0xc1, 0x0c, 0x4a, 0xf2, 0xbc,
+	0x21, 0xaa, 0x9b, 0xa4, 0x0e, 0xe7, 0x14, 0x87, 0x1c, 0x3d, 0x4c, 0x04, 0x4e, 0xf1, 0x85, 0x08,
+	0xd7, 0x49, 0xe6, 0xd1, 0x3b, 0xa4, 0x26, 0x2c, 0x08, 0x86, 0x1e, 0xe6, 0x00, 0x9b, 0xb6, 0x5c,
+	0x3a, 0x7b, 0xb5, 0x01, 0xf6, 0xe9, 0x6a, 0xe9, 0xba, 0xd5, 0x8b, 0xa5, 0xb9, 0xf6, 0xfe, 0xab,
+	0xa9, 0xb8, 0x55, 0x99, 0xd6, 0x41, 0xeb, 0x35, 0xd1, 0x24, 0x53, 0x7d, 0x97, 0x6c, 0x70, 0x0c,
+	0x68, 0x28, 0x9b, 0x75, 0xa5, 0x21, 0x6e, 0x99, 0x63, 0xc0, 0x12, 0x5c, 0xdd, 0xb2, 0xb4, 0x72,
+	0x3f, 0xc4, 0x71, 0xd6, 0x96, 0xf4, 0x43, 0x1c, 0xeb, 0x4d, 0x52, 0x45, 0x88, 0xe7, 0x34, 0xf4,
+	0x66, 0x59, 0x47, 0x55, 0xb7, 0xb0, 0xad, 0x4f, 0x0a, 0xa9, 0x8a, 0x62, 0xcf, 0xce, 0x29, 0x96,
+	0x5c, 0x39, 0x35, 0x27, 0x54, 0xcb, 0x57, 0xa0, 0xe7, 0xaa, 0xb4, 0x40, 0x57, 0xf9, 0x23, 0xba,
+	0xf5, 0xdb, 0xd1, 0x6d, 0x94, 0x42, 0x77, 0x40, 0x36, 0xc5, 0x34, 0x83, 0xc1, 0x71, 0x99, 0x61,
+	0xac, 0x29, 0xd9, 0x92, 0x30, 0xc0, 0xef, 0x04, 0x01, 0x04, 0xa5, 0x88, 0x3c, 0x20, 0x9b, 0x70,
+	0x0e, 0xfe, 0xb0, 0xc0, 0x42, 0xd2, 0xa5, 0xa9, 0x09, 0xcd, 0x7e, 0xcf, 0xd5, 0xc4, 0xa7, 0x7e,
+	0x60, 0xbd, 0x25, 0xdb, 0xab, 0x4a, 0xd9, 0xce, 0xff, 0xc7, 0x5a, 0x37, 0xaf, 0xc2, 0x3a, 0x94,
+	0xbf, 0x8c, 0xe7, 0x5e, 0xc2, 0xcb, 0x15, 0xb6, 0x3a, 0xa4, 0x2e, 0x14, 0x5c, 0xe0, 0xc9, 0xbc,
+	0xa4, 0xc4, 0x98, 0xec, 0x64, 0xcf, 0x5d, 0xf1, 0x2c, 0x94, 0x64, 0xf0, 0xeb, 0x63, 0xa3, 0xfe,
+	0xfe, 0xd8, 0x74, 0x5f, 0x5d, 0x5c, 0x1a, 0x6b, 0x5f, 0x2e, 0x8d, 0xb5, 0x77, 0xa9, 0xa1, 0x5c,
+	0xa4, 0x86, 0xf2, 0x39, 0x35, 0x94, 0x6f, 0xa9, 0xa1, 0x7c, 0xf8, 0x6e, 0x28, 0x2f, 0x9f, 0x96,
+	0xfc, 0x27, 0x7a, 0x22, 0x4f, 0x23, 0x2d, 0xdb, 0xcc, 0x47, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff,
+	0x76, 0xdf, 0xe7, 0xaa, 0xd2, 0x06, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/events/v1/task.proto b/vendor/github.com/containerd/containerd/api/services/events/v1/task.proto
new file mode 100644
index 0000000..79f87a6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/events/v1/task.proto
@@ -0,0 +1,75 @@
+syntax = "proto3";
+
+package containerd.services.events.v1;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/timestamp.proto";
+import "github.com/containerd/containerd/api/types/mount.proto";
+import "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/events/v1;events";
+option (containerd.plugin.fieldpath_all) = true;
+
+message TaskCreate {
+	string container_id = 1;
+	string bundle = 2;
+	repeated containerd.types.Mount rootfs = 3;
+	TaskIO io = 4 [(gogoproto.customname) = "IO"];
+	string checkpoint = 5;
+	uint32 pid = 6;
+}
+
+message TaskStart {
+	string container_id = 1;
+	uint32 pid = 2;
+}
+
+message TaskDelete {
+	string container_id = 1;
+	uint32 pid = 2;
+	uint32 exit_status = 3;
+	google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message TaskIO {
+	string stdin = 1;
+	string stdout = 2;
+	string stderr = 3;
+	bool terminal = 4;
+}
+
+message TaskExit {
+	string container_id = 1;
+	string id = 2;
+	uint32 pid = 3;
+	uint32 exit_status = 4;
+	google.protobuf.Timestamp exited_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message TaskOOM {
+	string container_id = 1;
+}
+
+message TaskExecAdded {
+	string container_id = 1;
+	string exec_id = 2;
+}
+
+message TaskExecStarted {
+	string container_id = 1;
+	string exec_id = 2;
+	uint32 pid = 3;
+}
+
+message TaskPaused {
+	string container_id = 1;
+}
+
+message TaskResumed {
+	string container_id = 1;
+}
+
+message TaskCheckpointed {
+	string container_id = 1;
+	string checkpoint = 2;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go b/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go
new file mode 100644
index 0000000..a8d61a3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/docs.go
@@ -0,0 +1 @@
+package images
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
new file mode 100644
index 0000000..41b97df
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.pb.go
@@ -0,0 +1,2191 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/images/v1/images.proto
+// DO NOT EDIT!
+
+/*
+	Package images is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/images/v1/images.proto
+
+	It has these top-level messages:
+		Image
+		GetImageRequest
+		GetImageResponse
+		CreateImageRequest
+		CreateImageResponse
+		UpdateImageRequest
+		UpdateImageResponse
+		ListImagesRequest
+		ListImagesResponse
+		DeleteImageRequest
+*/
+package images
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf2 "github.com/gogo/protobuf/types"
+import _ "github.com/gogo/protobuf/types"
+import containerd_types "github.com/containerd/containerd/api/types"
+
+import time "time"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Image struct {
+	// Name provides a unique name for the image.
+	//
+	// Containerd treats this as the primary identifier.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// Labels provides free form labels for the image. These are runtime only
+	// and do not get inherited into the package image in any way.
+	//
+	// Labels may be updated using the field mask.
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Target describes the content entry point of the image.
+	Target containerd_types.Descriptor `protobuf:"bytes,3,opt,name=target" json:"target"`
+	// CreatedAt is the time the image was first created.
+	CreatedAt time.Time `protobuf:"bytes,7,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	// UpdatedAt is the last time the image was mutated.
+	UpdatedAt time.Time `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+}
+
+func (m *Image) Reset()                    { *m = Image{} }
+func (*Image) ProtoMessage()               {}
+func (*Image) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{0} }
+
+type GetImageRequest struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *GetImageRequest) Reset()                    { *m = GetImageRequest{} }
+func (*GetImageRequest) ProtoMessage()               {}
+func (*GetImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{1} }
+
+type GetImageResponse struct {
+	Image *Image `protobuf:"bytes,1,opt,name=image" json:"image,omitempty"`
+}
+
+func (m *GetImageResponse) Reset()                    { *m = GetImageResponse{} }
+func (*GetImageResponse) ProtoMessage()               {}
+func (*GetImageResponse) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{2} }
+
+type CreateImageRequest struct {
+	Image Image `protobuf:"bytes,1,opt,name=image" json:"image"`
+}
+
+func (m *CreateImageRequest) Reset()                    { *m = CreateImageRequest{} }
+func (*CreateImageRequest) ProtoMessage()               {}
+func (*CreateImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{3} }
+
+type CreateImageResponse struct {
+	Image Image `protobuf:"bytes,1,opt,name=image" json:"image"`
+}
+
+func (m *CreateImageResponse) Reset()                    { *m = CreateImageResponse{} }
+func (*CreateImageResponse) ProtoMessage()               {}
+func (*CreateImageResponse) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{4} }
+
+type UpdateImageRequest struct {
+	// Image provides a full or partial image for update.
+	//
+	// The name field must be set or an error will be returned.
+	Image Image `protobuf:"bytes,1,opt,name=image" json:"image"`
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+}
+
+func (m *UpdateImageRequest) Reset()                    { *m = UpdateImageRequest{} }
+func (*UpdateImageRequest) ProtoMessage()               {}
+func (*UpdateImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{5} }
+
+type UpdateImageResponse struct {
+	Image Image `protobuf:"bytes,1,opt,name=image" json:"image"`
+}
+
+func (m *UpdateImageResponse) Reset()                    { *m = UpdateImageResponse{} }
+func (*UpdateImageResponse) ProtoMessage()               {}
+func (*UpdateImageResponse) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{6} }
+
+type ListImagesRequest struct {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, images that match the following will be
+	// returned:
+	//
+	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+}
+
+func (m *ListImagesRequest) Reset()                    { *m = ListImagesRequest{} }
+func (*ListImagesRequest) ProtoMessage()               {}
+func (*ListImagesRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{7} }
+
+type ListImagesResponse struct {
+	Images []Image `protobuf:"bytes,1,rep,name=images" json:"images"`
+}
+
+func (m *ListImagesResponse) Reset()                    { *m = ListImagesResponse{} }
+func (*ListImagesResponse) ProtoMessage()               {}
+func (*ListImagesResponse) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{8} }
+
+type DeleteImageRequest struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *DeleteImageRequest) Reset()                    { *m = DeleteImageRequest{} }
+func (*DeleteImageRequest) ProtoMessage()               {}
+func (*DeleteImageRequest) Descriptor() ([]byte, []int) { return fileDescriptorImages, []int{9} }
+
+func init() {
+	proto.RegisterType((*Image)(nil), "containerd.services.images.v1.Image")
+	proto.RegisterType((*GetImageRequest)(nil), "containerd.services.images.v1.GetImageRequest")
+	proto.RegisterType((*GetImageResponse)(nil), "containerd.services.images.v1.GetImageResponse")
+	proto.RegisterType((*CreateImageRequest)(nil), "containerd.services.images.v1.CreateImageRequest")
+	proto.RegisterType((*CreateImageResponse)(nil), "containerd.services.images.v1.CreateImageResponse")
+	proto.RegisterType((*UpdateImageRequest)(nil), "containerd.services.images.v1.UpdateImageRequest")
+	proto.RegisterType((*UpdateImageResponse)(nil), "containerd.services.images.v1.UpdateImageResponse")
+	proto.RegisterType((*ListImagesRequest)(nil), "containerd.services.images.v1.ListImagesRequest")
+	proto.RegisterType((*ListImagesResponse)(nil), "containerd.services.images.v1.ListImagesResponse")
+	proto.RegisterType((*DeleteImageRequest)(nil), "containerd.services.images.v1.DeleteImageRequest")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Images service
+
+type ImagesClient interface {
+	// Get returns an image by name.
+	Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error)
+	// List returns a list of all images known to containerd.
+	List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error)
+	// Create an image record in the metadata store.
+	//
+	// The name of the image must be unique.
+	Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error)
+	// Update assigns the name to a given target image based on the provided
+	// image.
+	Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error)
+	// Delete deletes the image by name.
+	Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+}
+
+type imagesClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewImagesClient(cc *grpc.ClientConn) ImagesClient {
+	return &imagesClient{cc}
+}
+
+func (c *imagesClient) Get(ctx context.Context, in *GetImageRequest, opts ...grpc.CallOption) (*GetImageResponse, error) {
+	out := new(GetImageResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/Get", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *imagesClient) List(ctx context.Context, in *ListImagesRequest, opts ...grpc.CallOption) (*ListImagesResponse, error) {
+	out := new(ListImagesResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/List", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *imagesClient) Create(ctx context.Context, in *CreateImageRequest, opts ...grpc.CallOption) (*CreateImageResponse, error) {
+	out := new(CreateImageResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/Create", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *imagesClient) Update(ctx context.Context, in *UpdateImageRequest, opts ...grpc.CallOption) (*UpdateImageResponse, error) {
+	out := new(UpdateImageResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/Update", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *imagesClient) Delete(ctx context.Context, in *DeleteImageRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.images.v1.Images/Delete", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Images service
+
+type ImagesServer interface {
+	// Get returns an image by name.
+	Get(context.Context, *GetImageRequest) (*GetImageResponse, error)
+	// List returns a list of all images known to containerd.
+	List(context.Context, *ListImagesRequest) (*ListImagesResponse, error)
+	// Create an image record in the metadata store.
+	//
+	// The name of the image must be unique.
+	Create(context.Context, *CreateImageRequest) (*CreateImageResponse, error)
+	// Update assigns the name to a given target image based on the provided
+	// image.
+	Update(context.Context, *UpdateImageRequest) (*UpdateImageResponse, error)
+	// Delete deletes the image by name.
+	Delete(context.Context, *DeleteImageRequest) (*google_protobuf1.Empty, error)
+}
+
+func RegisterImagesServer(s *grpc.Server, srv ImagesServer) {
+	s.RegisterService(&_Images_serviceDesc, srv)
+}
+
+func _Images_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ImagesServer).Get(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.images.v1.Images/Get",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ImagesServer).Get(ctx, req.(*GetImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Images_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListImagesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ImagesServer).List(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.images.v1.Images/List",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ImagesServer).List(ctx, req.(*ListImagesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Images_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ImagesServer).Create(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.images.v1.Images/Create",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ImagesServer).Create(ctx, req.(*CreateImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Images_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ImagesServer).Update(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.images.v1.Images/Update",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ImagesServer).Update(ctx, req.(*UpdateImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Images_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ImagesServer).Delete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.images.v1.Images/Delete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ImagesServer).Delete(ctx, req.(*DeleteImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Images_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.images.v1.Images",
+	HandlerType: (*ImagesServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Get",
+			Handler:    _Images_Get_Handler,
+		},
+		{
+			MethodName: "List",
+			Handler:    _Images_List_Handler,
+		},
+		{
+			MethodName: "Create",
+			Handler:    _Images_Create_Handler,
+		},
+		{
+			MethodName: "Update",
+			Handler:    _Images_Update_Handler,
+		},
+		{
+			MethodName: "Delete",
+			Handler:    _Images_Delete_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/containerd/containerd/api/services/images/v1/images.proto",
+}
+
+func (m *Image) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Image) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x12
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovImages(uint64(len(k))) + 1 + len(v) + sovImages(uint64(len(v)))
+			i = encodeVarintImages(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintImages(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintImages(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	dAtA[i] = 0x1a
+	i++
+	i = encodeVarintImages(dAtA, i, uint64(m.Target.Size()))
+	n1, err := m.Target.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+	dAtA[i] = 0x3a
+	i++
+	i = encodeVarintImages(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	dAtA[i] = 0x42
+	i++
+	i = encodeVarintImages(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	return i, nil
+}
+
+func (m *GetImageRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetImageRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func (m *GetImageResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetImageResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Image != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
+		n4, err := m.Image.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n4
+	}
+	return i, nil
+}
+
+func (m *CreateImageRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateImageRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
+	n5, err := m.Image.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n5
+	return i, nil
+}
+
+func (m *CreateImageResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateImageResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
+	n6, err := m.Image.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n6
+	return i, nil
+}
+
+func (m *UpdateImageRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateImageRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
+	n7, err := m.Image.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n7
+	if m.UpdateMask != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintImages(dAtA, i, uint64(m.UpdateMask.Size()))
+		n8, err := m.UpdateMask.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n8
+	}
+	return i, nil
+}
+
+func (m *UpdateImageResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateImageResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintImages(dAtA, i, uint64(m.Image.Size()))
+	n9, err := m.Image.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n9
+	return i, nil
+}
+
+func (m *ListImagesRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListImagesRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			dAtA[i] = 0xa
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	return i, nil
+}
+
+func (m *ListImagesResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListImagesResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Images) > 0 {
+		for _, msg := range m.Images {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintImages(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *DeleteImageRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteImageRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintImages(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func encodeFixed64Images(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Images(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintImages(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Image) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovImages(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovImages(uint64(len(k))) + 1 + len(v) + sovImages(uint64(len(v)))
+			n += mapEntrySize + 1 + sovImages(uint64(mapEntrySize))
+		}
+	}
+	l = m.Target.Size()
+	n += 1 + l + sovImages(uint64(l))
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
+	n += 1 + l + sovImages(uint64(l))
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
+	n += 1 + l + sovImages(uint64(l))
+	return n
+}
+
+func (m *GetImageRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovImages(uint64(l))
+	}
+	return n
+}
+
+func (m *GetImageResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Image != nil {
+		l = m.Image.Size()
+		n += 1 + l + sovImages(uint64(l))
+	}
+	return n
+}
+
+func (m *CreateImageRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Image.Size()
+	n += 1 + l + sovImages(uint64(l))
+	return n
+}
+
+func (m *CreateImageResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Image.Size()
+	n += 1 + l + sovImages(uint64(l))
+	return n
+}
+
+func (m *UpdateImageRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Image.Size()
+	n += 1 + l + sovImages(uint64(l))
+	if m.UpdateMask != nil {
+		l = m.UpdateMask.Size()
+		n += 1 + l + sovImages(uint64(l))
+	}
+	return n
+}
+
+func (m *UpdateImageResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Image.Size()
+	n += 1 + l + sovImages(uint64(l))
+	return n
+}
+
+func (m *ListImagesRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			l = len(s)
+			n += 1 + l + sovImages(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ListImagesResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Images) > 0 {
+		for _, e := range m.Images {
+			l = e.Size()
+			n += 1 + l + sovImages(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *DeleteImageRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovImages(uint64(l))
+	}
+	return n
+}
+
+func sovImages(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozImages(x uint64) (n int) {
+	return sovImages(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Image) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&Image{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`Target:` + strings.Replace(strings.Replace(this.Target.String(), "Descriptor", "containerd_types.Descriptor", 1), `&`, ``, 1) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetImageRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetImageRequest{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetImageResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetImageResponse{`,
+		`Image:` + strings.Replace(fmt.Sprintf("%v", this.Image), "Image", "Image", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateImageRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateImageRequest{`,
+		`Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateImageResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateImageResponse{`,
+		`Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateImageRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateImageRequest{`,
+		`Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf2.FieldMask", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateImageResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateImageResponse{`,
+		`Image:` + strings.Replace(strings.Replace(this.Image.String(), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListImagesRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListImagesRequest{`,
+		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListImagesResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListImagesResponse{`,
+		`Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "Image", "Image", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteImageRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteImageRequest{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringImages(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Image) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Image: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Image: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthImages
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowImages
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowImages
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthImages
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetImageRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetImageRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetImageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetImageResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetImageResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetImageResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Image == nil {
+				m.Image = &Image{}
+			}
+			if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateImageRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateImageRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateImageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateImageResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateImageResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateImageResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateImageRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateImageRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateImageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.UpdateMask == nil {
+				m.UpdateMask = &google_protobuf2.FieldMask{}
+			}
+			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateImageResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateImageResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateImageResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Image.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListImagesRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListImagesRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListImagesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListImagesResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListImagesResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListImagesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Images = append(m.Images, Image{})
+			if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteImageRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteImageRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteImageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthImages
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipImages(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthImages
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipImages(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowImages
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowImages
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthImages
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowImages
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipImages(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthImages = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowImages   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/images/v1/images.proto", fileDescriptorImages)
+}
+
+var fileDescriptorImages = []byte{
+	// 648 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4f, 0x6f, 0xd3, 0x4e,
+	0x10, 0xad, 0x93, 0xd4, 0x6d, 0x27, 0x87, 0x5f, 0x7f, 0x4b, 0x85, 0x2c, 0x03, 0x69, 0x14, 0x81,
+	0x94, 0x0b, 0x6b, 0x1a, 0x2e, 0xd0, 0x4a, 0x88, 0xa6, 0x2d, 0x05, 0xa9, 0x70, 0x30, 0xff, 0x2a,
+	0x2e, 0xd5, 0x26, 0x99, 0x18, 0x2b, 0x76, 0x6c, 0xbc, 0x9b, 0x48, 0xb9, 0xf1, 0x11, 0x90, 0xe0,
+	0x43, 0xf5, 0xc8, 0x91, 0x13, 0xd0, 0x1c, 0xf8, 0x1c, 0xc8, 0xbb, 0x1b, 0x9a, 0x26, 0x11, 0x4e,
+	0x4a, 0x6f, 0xe3, 0xf8, 0xbd, 0x79, 0x33, 0x6f, 0x66, 0x62, 0xd8, 0xf7, 0x7c, 0xf1, 0xbe, 0xd7,
+	0xa0, 0xcd, 0x28, 0x74, 0x9a, 0x51, 0x57, 0x30, 0xbf, 0x8b, 0x49, 0x6b, 0x3c, 0x64, 0xb1, 0xef,
+	0x70, 0x4c, 0xfa, 0x7e, 0x13, 0xb9, 0xe3, 0x87, 0xcc, 0x43, 0xee, 0xf4, 0xb7, 0x74, 0x44, 0xe3,
+	0x24, 0x12, 0x11, 0xb9, 0x75, 0x8e, 0xa7, 0x23, 0x2c, 0xd5, 0x88, 0xfe, 0x96, 0xbd, 0xe1, 0x45,
+	0x5e, 0x24, 0x91, 0x4e, 0x1a, 0x29, 0x92, 0x7d, 0xc3, 0x8b, 0x22, 0x2f, 0x40, 0x47, 0x3e, 0x35,
+	0x7a, 0x6d, 0x07, 0xc3, 0x58, 0x0c, 0xf4, 0xcb, 0xf2, 0xe4, 0xcb, 0xb6, 0x8f, 0x41, 0xeb, 0x24,
+	0x64, 0xbc, 0xa3, 0x11, 0x9b, 0x93, 0x08, 0xe1, 0x87, 0xc8, 0x05, 0x0b, 0x63, 0x0d, 0xd8, 0x99,
+	0xab, 0x35, 0x31, 0x88, 0x91, 0x3b, 0x2d, 0xe4, 0xcd, 0xc4, 0x8f, 0x45, 0x94, 0x28, 0x72, 0xe5,
+	0x57, 0x0e, 0x96, 0x9f, 0xa5, 0x0d, 0x10, 0x02, 0x85, 0x2e, 0x0b, 0xd1, 0x32, 0xca, 0x46, 0x75,
+	0xcd, 0x95, 0x31, 0x79, 0x0a, 0x66, 0xc0, 0x1a, 0x18, 0x70, 0x2b, 0x57, 0xce, 0x57, 0x8b, 0xb5,
+	0x7b, 0xf4, 0xaf, 0x06, 0x50, 0x99, 0x89, 0x1e, 0x49, 0xca, 0x41, 0x57, 0x24, 0x03, 0x57, 0xf3,
+	0xc9, 0x36, 0x98, 0x82, 0x25, 0x1e, 0x0a, 0x2b, 0x5f, 0x36, 0xaa, 0xc5, 0xda, 0xcd, 0xf1, 0x4c,
+	0xb2, 0x36, 0xba, 0xff, 0xa7, 0xb6, 0x7a, 0xe1, 0xf4, 0xfb, 0xe6, 0x92, 0xab, 0x19, 0x64, 0x0f,
+	0xa0, 0x99, 0x20, 0x13, 0xd8, 0x3a, 0x61, 0xc2, 0x5a, 0x91, 0x7c, 0x9b, 0x2a, 0x5b, 0xe8, 0xc8,
+	0x16, 0xfa, 0x6a, 0x64, 0x4b, 0x7d, 0x35, 0x65, 0x7f, 0xfa, 0xb1, 0x69, 0xb8, 0x6b, 0x9a, 0xb7,
+	0x2b, 0x93, 0xf4, 0xe2, 0xd6, 0x28, 0xc9, 0xea, 0x22, 0x49, 0x34, 0x6f, 0x57, 0xd8, 0x0f, 0xa1,
+	0x38, 0xd6, 0x1c, 0x59, 0x87, 0x7c, 0x07, 0x07, 0xda, 0xb1, 0x34, 0x24, 0x1b, 0xb0, 0xdc, 0x67,
+	0x41, 0x0f, 0xad, 0x9c, 0xfc, 0x4d, 0x3d, 0x6c, 0xe7, 0x1e, 0x18, 0x95, 0x3b, 0xf0, 0xdf, 0x21,
+	0x0a, 0x69, 0x90, 0x8b, 0x1f, 0x7a, 0xc8, 0xc5, 0x2c, 0xc7, 0x2b, 0x2f, 0x60, 0xfd, 0x1c, 0xc6,
+	0xe3, 0xa8, 0xcb, 0x91, 0x6c, 0xc3, 0xb2, 0xb4, 0x58, 0x02, 0x8b, 0xb5, 0xdb, 0xf3, 0x0c, 0xc1,
+	0x55, 0x94, 0xca, 0x1b, 0x20, 0x7b, 0xd2, 0x83, 0x0b, 0xca, 0x8f, 0x2f, 0x91, 0x51, 0x0f, 0x45,
+	0xe7, 0x7d, 0x0b, 0xd7, 0x2e, 0xe4, 0xd5, 0xa5, 0xfe, 0x7b, 0xe2, 0xcf, 0x06, 0x90, 0xd7, 0xd2,
+	0xf0, 0xab, 0xad, 0x98, 0xec, 0x40, 0x51, 0x0d, 0x52, 0x1e, 0x97, 0x1c, 0xd0, 0xac, 0x0d, 0x78,
+	0x92, 0xde, 0xdf, 0x73, 0xc6, 0x3b, 0xae, 0xde, 0x97, 0x34, 0x4e, 0xdb, 0xbd, 0x50, 0xd4, 0x95,
+	0xb5, 0x7b, 0x17, 0xfe, 0x3f, 0xf2, 0xb9, 0x1a, 0x38, 0x1f, 0x35, 0x6b, 0xc1, 0x4a, 0xdb, 0x0f,
+	0x04, 0x26, 0xdc, 0x32, 0xca, 0xf9, 0xea, 0x9a, 0x3b, 0x7a, 0xac, 0x1c, 0x03, 0x19, 0x87, 0xeb,
+	0x32, 0xea, 0x60, 0x2a, 0x11, 0x09, 0x5f, 0xac, 0x0e, 0xcd, 0xac, 0x54, 0x81, 0xec, 0x63, 0x80,
+	0x13, 0xb6, 0xcf, 0x58, 0xd1, 0xda, 0x97, 0x02, 0x98, 0xaa, 0x00, 0xd2, 0x86, 0xfc, 0x21, 0x0a,
+	0x42, 0x33, 0xf4, 0x26, 0x16, 0xdf, 0x76, 0xe6, 0xc6, 0xeb, 0x06, 0x3b, 0x50, 0x48, 0xdb, 0x26,
+	0x59, 0xff, 0x3f, 0x53, 0x56, 0xda, 0x5b, 0x0b, 0x30, 0xb4, 0x58, 0x04, 0xa6, 0x5a, 0x6d, 0x92,
+	0x45, 0x9e, 0xbe, 0x2c, 0xbb, 0xb6, 0x08, 0xe5, 0x5c, 0x50, 0x2d, 0x57, 0xa6, 0xe0, 0xf4, 0x61,
+	0x64, 0x0a, 0xce, 0x5a, 0xdb, 0x97, 0x60, 0xaa, 0x59, 0x67, 0x0a, 0x4e, 0xaf, 0x84, 0x7d, 0x7d,
+	0xea, 0x64, 0x0e, 0xd2, 0xef, 0x59, 0xfd, 0xf8, 0xf4, 0xac, 0xb4, 0xf4, 0xed, 0xac, 0xb4, 0xf4,
+	0x71, 0x58, 0x32, 0x4e, 0x87, 0x25, 0xe3, 0xeb, 0xb0, 0x64, 0xfc, 0x1c, 0x96, 0x8c, 0x77, 0x8f,
+	0x2e, 0xf9, 0xed, 0xdd, 0x51, 0x51, 0xc3, 0x94, 0x4a, 0xf7, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff,
+	0x74, 0x4c, 0xf0, 0x24, 0xc4, 0x07, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
new file mode 100644
index 0000000..d6b6b83
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/images/v1/images.proto
@@ -0,0 +1,118 @@
+syntax = "proto3";
+
+package containerd.services.images.v1;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "github.com/containerd/containerd/api/types/descriptor.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/images/v1;images";
+
+// Images is a service that allows one to register images with containerd.
+//
+// In containerd, an image is merely the mapping of a name to a content root,
+// described by a descriptor. The behavior and state of image is purely
+// dictated by the type of the descriptor.
+//
+// From the perspective of this service, these references are mostly shallow,
+// in that the existence of the required content won't be validated until
+// required by consuming services.
+//
+// As such, this can really be considered a "metadata service".
+service Images {
+	// Get returns an image by name.
+	rpc Get(GetImageRequest) returns (GetImageResponse);
+
+	// List returns a list of all images known to containerd.
+	rpc List(ListImagesRequest) returns (ListImagesResponse);
+
+	// Create an image record in the metadata store.
+	//
+	// The name of the image must be unique.
+	rpc Create(CreateImageRequest) returns (CreateImageResponse);
+
+	// Update assigns the name to a given target image based on the provided
+	// image.
+	rpc Update(UpdateImageRequest) returns (UpdateImageResponse);
+
+	// Delete deletes the image by name.
+	rpc Delete(DeleteImageRequest) returns (google.protobuf.Empty);
+}
+
+message Image {
+	// Name provides a unique name for the image.
+	//
+	// Containerd treats this as the primary identifier.
+	string name = 1;
+
+	// Labels provides free form labels for the image. These are runtime only
+	// and do not get inherited into the package image in any way.
+	//
+	// Labels may be updated using the field mask.
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels = 2;
+
+	// Target describes the content entry point of the image.
+	containerd.types.Descriptor target = 3 [(gogoproto.nullable) = false];
+
+	// CreatedAt is the time the image was first created.
+	google.protobuf.Timestamp created_at = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// UpdatedAt is the last time the image was mutated.
+	google.protobuf.Timestamp updated_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message GetImageRequest {
+	string name = 1;
+}
+
+message GetImageResponse {
+	Image image = 1;
+}
+
+message CreateImageRequest {
+	Image image = 1 [(gogoproto.nullable) = false];
+}
+
+message CreateImageResponse {
+	Image image = 1 [(gogoproto.nullable) = false];
+}
+
+message UpdateImageRequest {
+	// Image provides a full or partial image for update.
+	//
+	// The name field must be set or an error will be returned.
+	Image image = 1 [(gogoproto.nullable) = false];
+
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	google.protobuf.FieldMask update_mask = 2;
+}
+
+message UpdateImageResponse {
+	Image image = 1 [(gogoproto.nullable) = false];
+}
+
+message ListImagesRequest {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, images that match the following will be
+	// returned:
+	//
+	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	repeated string filters = 1;
+}
+
+message ListImagesResponse {
+	repeated Image images = 1 [(gogoproto.nullable) = false];
+}
+
+message DeleteImageRequest {
+	string name = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go
new file mode 100644
index 0000000..3b9d794
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/doc.go
@@ -0,0 +1 @@
+package introspection
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
new file mode 100644
index 0000000..9e61bf7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.pb.go
@@ -0,0 +1,1175 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
+// DO NOT EDIT!
+
+/*
+	Package introspection is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
+
+	It has these top-level messages:
+		Plugin
+		PluginsRequest
+		PluginsResponse
+*/
+package introspection
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import containerd_types "github.com/containerd/containerd/api/types"
+import google_rpc "github.com/containerd/containerd/protobuf/google/rpc"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Plugin struct {
+	// Type defines the type of plugin.
+	//
+	// See package plugin for a list of possible values. Non core plugins may
+	// define their own values during registration.
+	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	// ID identifies the plugin uniquely in the system.
+	ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+	// Requires lists the plugin types required by this plugin.
+	Requires []string `protobuf:"bytes,3,rep,name=requires" json:"requires,omitempty"`
+	// Platforms enumerates the platforms this plugin will support.
+	//
+	// If values are provided here, the plugin will only be operable under the
+	// provided platforms.
+	//
+	// If this is empty, the plugin will work across all platforms.
+	//
+	// If the plugin prefers certain platforms over others, they should be
+	// listed from most to least preferred.
+	Platforms []containerd_types.Platform `protobuf:"bytes,4,rep,name=platforms" json:"platforms"`
+	// Exports allows plugins to provide values about state or configuration to
+	// interested parties.
+	//
+	// One example is exposing the configured path of a snapshotter plugin.
+	Exports map[string]string `protobuf:"bytes,5,rep,name=exports" json:"exports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Capabilities allows plugins to communicate feature switches to allow
+	// clients to detect features that may not be on be default or may be
+	// different from version to version.
+	//
+	// Use this sparingly.
+	Capabilities []string `protobuf:"bytes,6,rep,name=capabilities" json:"capabilities,omitempty"`
+	// InitErr will be set if the plugin fails initialization.
+	//
+	// This means the plugin may have been registered but a non-terminal error
+	// was encountered during initialization.
+	//
+	// Plugins that have this value set cannot be used.
+	InitErr *google_rpc.Status `protobuf:"bytes,7,opt,name=init_err,json=initErr" json:"init_err,omitempty"`
+}
+
+func (m *Plugin) Reset()                    { *m = Plugin{} }
+func (*Plugin) ProtoMessage()               {}
+func (*Plugin) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{0} }
+
+type PluginsRequest struct {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, plugins that match the following will be
+	// returned:
+	//
+	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+}
+
+func (m *PluginsRequest) Reset()                    { *m = PluginsRequest{} }
+func (*PluginsRequest) ProtoMessage()               {}
+func (*PluginsRequest) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{1} }
+
+type PluginsResponse struct {
+	Plugins []Plugin `protobuf:"bytes,1,rep,name=plugins" json:"plugins"`
+}
+
+func (m *PluginsResponse) Reset()                    { *m = PluginsResponse{} }
+func (*PluginsResponse) ProtoMessage()               {}
+func (*PluginsResponse) Descriptor() ([]byte, []int) { return fileDescriptorIntrospection, []int{2} }
+
+func init() {
+	proto.RegisterType((*Plugin)(nil), "containerd.services.introspection.v1.Plugin")
+	proto.RegisterType((*PluginsRequest)(nil), "containerd.services.introspection.v1.PluginsRequest")
+	proto.RegisterType((*PluginsResponse)(nil), "containerd.services.introspection.v1.PluginsResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Introspection service
+
+type IntrospectionClient interface {
+	// Plugins returns a list of plugins in containerd.
+	//
+	// Clients can use this to detect features and capabilities when using
+	// containerd.
+	Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error)
+}
+
+type introspectionClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewIntrospectionClient(cc *grpc.ClientConn) IntrospectionClient {
+	return &introspectionClient{cc}
+}
+
+func (c *introspectionClient) Plugins(ctx context.Context, in *PluginsRequest, opts ...grpc.CallOption) (*PluginsResponse, error) {
+	out := new(PluginsResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.introspection.v1.Introspection/Plugins", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Introspection service
+
+type IntrospectionServer interface {
+	// Plugins returns a list of plugins in containerd.
+	//
+	// Clients can use this to detect features and capabilities when using
+	// containerd.
+	Plugins(context.Context, *PluginsRequest) (*PluginsResponse, error)
+}
+
+func RegisterIntrospectionServer(s *grpc.Server, srv IntrospectionServer) {
+	s.RegisterService(&_Introspection_serviceDesc, srv)
+}
+
+func _Introspection_Plugins_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PluginsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(IntrospectionServer).Plugins(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.introspection.v1.Introspection/Plugins",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(IntrospectionServer).Plugins(ctx, req.(*PluginsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Introspection_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.introspection.v1.Introspection",
+	HandlerType: (*IntrospectionServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Plugins",
+			Handler:    _Introspection_Plugins_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/containerd/containerd/api/services/introspection/v1/introspection.proto",
+}
+
+func (m *Plugin) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Plugin) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Type) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintIntrospection(dAtA, i, uint64(len(m.Type)))
+		i += copy(dAtA[i:], m.Type)
+	}
+	if len(m.ID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintIntrospection(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if len(m.Requires) > 0 {
+		for _, s := range m.Requires {
+			dAtA[i] = 0x1a
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.Platforms) > 0 {
+		for _, msg := range m.Platforms {
+			dAtA[i] = 0x22
+			i++
+			i = encodeVarintIntrospection(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.Exports) > 0 {
+		for k, _ := range m.Exports {
+			dAtA[i] = 0x2a
+			i++
+			v := m.Exports[k]
+			mapSize := 1 + len(k) + sovIntrospection(uint64(len(k))) + 1 + len(v) + sovIntrospection(uint64(len(v)))
+			i = encodeVarintIntrospection(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintIntrospection(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintIntrospection(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	if len(m.Capabilities) > 0 {
+		for _, s := range m.Capabilities {
+			dAtA[i] = 0x32
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if m.InitErr != nil {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintIntrospection(dAtA, i, uint64(m.InitErr.Size()))
+		n1, err := m.InitErr.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	return i, nil
+}
+
+func (m *PluginsRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PluginsRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			dAtA[i] = 0xa
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	return i, nil
+}
+
+func (m *PluginsResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PluginsResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Plugins) > 0 {
+		for _, msg := range m.Plugins {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintIntrospection(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func encodeFixed64Introspection(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Introspection(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintIntrospection(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Plugin) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Type)
+	if l > 0 {
+		n += 1 + l + sovIntrospection(uint64(l))
+	}
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovIntrospection(uint64(l))
+	}
+	if len(m.Requires) > 0 {
+		for _, s := range m.Requires {
+			l = len(s)
+			n += 1 + l + sovIntrospection(uint64(l))
+		}
+	}
+	if len(m.Platforms) > 0 {
+		for _, e := range m.Platforms {
+			l = e.Size()
+			n += 1 + l + sovIntrospection(uint64(l))
+		}
+	}
+	if len(m.Exports) > 0 {
+		for k, v := range m.Exports {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovIntrospection(uint64(len(k))) + 1 + len(v) + sovIntrospection(uint64(len(v)))
+			n += mapEntrySize + 1 + sovIntrospection(uint64(mapEntrySize))
+		}
+	}
+	if len(m.Capabilities) > 0 {
+		for _, s := range m.Capabilities {
+			l = len(s)
+			n += 1 + l + sovIntrospection(uint64(l))
+		}
+	}
+	if m.InitErr != nil {
+		l = m.InitErr.Size()
+		n += 1 + l + sovIntrospection(uint64(l))
+	}
+	return n
+}
+
+func (m *PluginsRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			l = len(s)
+			n += 1 + l + sovIntrospection(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PluginsResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Plugins) > 0 {
+		for _, e := range m.Plugins {
+			l = e.Size()
+			n += 1 + l + sovIntrospection(uint64(l))
+		}
+	}
+	return n
+}
+
+func sovIntrospection(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozIntrospection(x uint64) (n int) {
+	return sovIntrospection(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Plugin) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForExports := make([]string, 0, len(this.Exports))
+	for k, _ := range this.Exports {
+		keysForExports = append(keysForExports, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForExports)
+	mapStringForExports := "map[string]string{"
+	for _, k := range keysForExports {
+		mapStringForExports += fmt.Sprintf("%v: %v,", k, this.Exports[k])
+	}
+	mapStringForExports += "}"
+	s := strings.Join([]string{`&Plugin{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Requires:` + fmt.Sprintf("%v", this.Requires) + `,`,
+		`Platforms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Platforms), "Platform", "containerd_types.Platform", 1), `&`, ``, 1) + `,`,
+		`Exports:` + mapStringForExports + `,`,
+		`Capabilities:` + fmt.Sprintf("%v", this.Capabilities) + `,`,
+		`InitErr:` + strings.Replace(fmt.Sprintf("%v", this.InitErr), "Status", "google_rpc.Status", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PluginsRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PluginsRequest{`,
+		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PluginsResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PluginsResponse{`,
+		`Plugins:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Plugins), "Plugin", "Plugin", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringIntrospection(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Plugin) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowIntrospection
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Plugin: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Plugin: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Requires", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Requires = append(m.Requires, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Platforms", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Platforms = append(m.Platforms, containerd_types.Platform{})
+			if err := m.Platforms[len(m.Platforms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exports", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Exports == nil {
+				m.Exports = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowIntrospection
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowIntrospection
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthIntrospection
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Exports[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Exports[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Capabilities = append(m.Capabilities, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field InitErr", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.InitErr == nil {
+				m.InitErr = &google_rpc.Status{}
+			}
+			if err := m.InitErr.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipIntrospection(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PluginsRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowIntrospection
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PluginsRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PluginsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipIntrospection(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PluginsResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowIntrospection
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PluginsResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PluginsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Plugins", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Plugins = append(m.Plugins, Plugin{})
+			if err := m.Plugins[len(m.Plugins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipIntrospection(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthIntrospection
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipIntrospection(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowIntrospection
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowIntrospection
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthIntrospection
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowIntrospection
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipIntrospection(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthIntrospection = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowIntrospection   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/introspection/v1/introspection.proto", fileDescriptorIntrospection)
+}
+
+var fileDescriptorIntrospection = []byte{
+	// 485 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0x3d, 0x6f, 0xdb, 0x30,
+	0x10, 0x35, 0x65, 0xc7, 0x8a, 0xcf, 0xe9, 0x07, 0x88, 0xa0, 0x15, 0x34, 0x28, 0x86, 0xd1, 0xc1,
+	0x28, 0x5a, 0x0a, 0x71, 0x5b, 0xa0, 0x49, 0x81, 0x0e, 0x46, 0x3d, 0x04, 0xc8, 0x10, 0x28, 0x5b,
+	0x97, 0x40, 0x96, 0x69, 0x95, 0xa8, 0x22, 0x32, 0x24, 0x25, 0xd4, 0x5b, 0xb7, 0xfe, 0x35, 0x8f,
+	0x1d, 0x3b, 0x05, 0x8d, 0x7e, 0x43, 0x7f, 0x40, 0x21, 0x51, 0x4a, 0xec, 0xcd, 0x46, 0xb6, 0xbb,
+	0xa7, 0xf7, 0xee, 0xde, 0x3d, 0x88, 0x10, 0xc4, 0x4c, 0x7f, 0xcb, 0x66, 0x24, 0xe2, 0xd7, 0x7e,
+	0xc4, 0x53, 0x1d, 0xb2, 0x94, 0xca, 0xf9, 0x7a, 0x19, 0x0a, 0xe6, 0x2b, 0x2a, 0x73, 0x16, 0x51,
+	0xe5, 0xb3, 0x54, 0x4b, 0xae, 0x04, 0x8d, 0x34, 0xe3, 0xa9, 0x9f, 0x1f, 0x6f, 0x02, 0x44, 0x48,
+	0xae, 0x39, 0x7e, 0xf5, 0xa0, 0x26, 0x8d, 0x92, 0x6c, 0x12, 0xf3, 0x63, 0xf7, 0x64, 0xab, 0xcd,
+	0x7a, 0x29, 0xa8, 0xf2, 0x45, 0x12, 0xea, 0x05, 0x97, 0xd7, 0x66, 0x81, 0xfb, 0x32, 0xe6, 0x3c,
+	0x4e, 0xa8, 0x2f, 0x45, 0xe4, 0x2b, 0x1d, 0xea, 0x4c, 0xd5, 0x1f, 0x0e, 0x63, 0x1e, 0xf3, 0xaa,
+	0xf4, 0xcb, 0xca, 0xa0, 0xc3, 0x7f, 0x16, 0x74, 0x2f, 0x92, 0x2c, 0x66, 0x29, 0xc6, 0xd0, 0x29,
+	0x27, 0x3a, 0x68, 0x80, 0x46, 0xbd, 0xa0, 0xaa, 0xf1, 0x0b, 0xb0, 0xd8, 0xdc, 0xb1, 0x4a, 0x64,
+	0xd2, 0x2d, 0x6e, 0x8f, 0xac, 0xb3, 0x2f, 0x81, 0xc5, 0xe6, 0xd8, 0x85, 0x7d, 0x49, 0x6f, 0x32,
+	0x26, 0xa9, 0x72, 0xda, 0x83, 0xf6, 0xa8, 0x17, 0xdc, 0xf7, 0xf8, 0x33, 0xf4, 0x1a, 0x4f, 0xca,
+	0xe9, 0x0c, 0xda, 0xa3, 0xfe, 0xd8, 0x25, 0x6b, 0x67, 0x57, 0xb6, 0xc9, 0x45, 0x4d, 0x99, 0x74,
+	0x56, 0xb7, 0x47, 0xad, 0xe0, 0x41, 0x82, 0x2f, 0xc1, 0xa6, 0x3f, 0x04, 0x97, 0x5a, 0x39, 0x7b,
+	0x95, 0xfa, 0x84, 0x6c, 0x13, 0x1a, 0x31, 0x67, 0x90, 0xa9, 0xd1, 0x4e, 0x53, 0x2d, 0x97, 0x41,
+	0x33, 0x09, 0x0f, 0xe1, 0x20, 0x0a, 0x45, 0x38, 0x63, 0x09, 0xd3, 0x8c, 0x2a, 0xa7, 0x5b, 0x99,
+	0xde, 0xc0, 0xf0, 0x5b, 0xd8, 0x67, 0x29, 0xd3, 0x57, 0x54, 0x4a, 0xc7, 0x1e, 0xa0, 0x51, 0x7f,
+	0x8c, 0x89, 0x49, 0x93, 0x48, 0x11, 0x91, 0xcb, 0x2a, 0xcd, 0xc0, 0x2e, 0x39, 0x53, 0x29, 0xdd,
+	0x53, 0x38, 0x58, 0xdf, 0x85, 0x9f, 0x43, 0xfb, 0x3b, 0x5d, 0xd6, 0xf1, 0x95, 0x25, 0x3e, 0x84,
+	0xbd, 0x3c, 0x4c, 0x32, 0x6a, 0x02, 0x0c, 0x4c, 0x73, 0x6a, 0x7d, 0x44, 0xc3, 0xd7, 0xf0, 0xd4,
+	0xd8, 0x55, 0x01, 0xbd, 0xc9, 0xa8, 0xd2, 0xd8, 0x01, 0x7b, 0xc1, 0x12, 0x4d, 0xa5, 0x72, 0x50,
+	0xe5, 0xad, 0x69, 0x87, 0x57, 0xf0, 0xec, 0x9e, 0xab, 0x04, 0x4f, 0x15, 0xc5, 0xe7, 0x60, 0x0b,
+	0x03, 0x55, 0xe4, 0xfe, 0xf8, 0xcd, 0x2e, 0x11, 0xd5, 0x91, 0x37, 0x23, 0xc6, 0xbf, 0x10, 0x3c,
+	0x39, 0x5b, 0xa7, 0xe2, 0x1c, 0xec, 0x7a, 0x25, 0x7e, 0xbf, 0xcb, 0xe4, 0xe6, 0x1a, 0xf7, 0xc3,
+	0x8e, 0x2a, 0x73, 0xd7, 0x64, 0xb1, 0xba, 0xf3, 0x5a, 0x7f, 0xee, 0xbc, 0xd6, 0xcf, 0xc2, 0x43,
+	0xab, 0xc2, 0x43, 0xbf, 0x0b, 0x0f, 0xfd, 0x2d, 0x3c, 0xf4, 0xf5, 0xfc, 0x71, 0x6f, 0xf1, 0xd3,
+	0x06, 0x30, 0xeb, 0x56, 0x3f, 0xff, 0xbb, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0x21, 0xec, 0xef,
+	0x92, 0xe2, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
new file mode 100644
index 0000000..424c731
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/introspection/v1/introspection.proto
@@ -0,0 +1,81 @@
+syntax = "proto3";
+
+package containerd.services.introspection.v1;
+
+import "github.com/containerd/containerd/api/types/platform.proto";
+import "google/rpc/status.proto";
+import "gogoproto/gogo.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/introspection/v1;introspection";
+
+service Introspection {
+	// Plugins returns a list of plugins in containerd.
+	//
+	// Clients can use this to detect features and capabilities when using
+	// containerd.
+	rpc Plugins(PluginsRequest) returns (PluginsResponse);
+}
+
+message Plugin {
+	// Type defines the type of plugin.
+	//
+	// See package plugin for a list of possible values. Non core plugins may
+	// define their own values during registration.
+	string type = 1;
+
+	// ID identifies the plugin uniquely in the system.
+	string id = 2;
+
+	// Requires lists the plugin types required by this plugin.
+	repeated string requires = 3;
+
+	// Platforms enumerates the platforms this plugin will support.
+	//
+	// If values are provided here, the plugin will only be operable under the
+	// provided platforms.
+	//
+	// If this is empty, the plugin will work across all platforms.
+	//
+	// If the plugin prefers certain platforms over others, they should be
+	// listed from most to least preferred.
+	repeated types.Platform platforms = 4 [(gogoproto.nullable) = false];
+
+	// Exports allows plugins to provide values about state or configuration to
+	// interested parties.
+	//
+	// One example is exposing the configured path of a snapshotter plugin.
+	map<string, string> exports = 5;
+
+	// Capabilities allows plugins to communicate feature switches to allow
+	// clients to detect features that may not be on be default or may be
+	// different from version to version.
+	//
+	// Use this sparingly.
+	repeated string capabilities = 6;
+
+	// InitErr will be set if the plugin fails initialization.
+	//
+	// This means the plugin may have been registered but a non-terminal error
+	// was encountered during initialization.
+	//
+	// Plugins that have this value set cannot be used.
+	google.rpc.Status init_err = 7;
+}
+
+message PluginsRequest {
+	// Filters contains one or more filters using the syntax defined in the
+	// containerd filter package.
+	//
+	// The returned result will be those that match any of the provided
+	// filters. Expanded, plugins that match the following will be
+	// returned:
+	//
+	//   filters[0] or filters[1] or ... or filters[n-1] or filters[n]
+	//
+	// If filters is zero-length or nil, all items will be returned.
+	repeated string filters = 1;
+}
+
+message PluginsResponse {
+	repeated Plugin plugins = 1 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
new file mode 100644
index 0000000..e98330b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.pb.go
@@ -0,0 +1,2012 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
+// DO NOT EDIT!
+
+/*
+	Package namespaces is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
+
+	It has these top-level messages:
+		Namespace
+		GetNamespaceRequest
+		GetNamespaceResponse
+		ListNamespacesRequest
+		ListNamespacesResponse
+		CreateNamespaceRequest
+		CreateNamespaceResponse
+		UpdateNamespaceRequest
+		UpdateNamespaceResponse
+		DeleteNamespaceRequest
+*/
+package namespaces
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf2 "github.com/gogo/protobuf/types"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Namespace struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// Labels provides an area to include arbitrary data on namespaces.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	//
+	// Note that to add a new value to this field, read the existing set and
+	// include the entire result in the update call.
+	Labels map[string]string `protobuf:"bytes,2,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *Namespace) Reset()                    { *m = Namespace{} }
+func (*Namespace) ProtoMessage()               {}
+func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{0} }
+
+type GetNamespaceRequest struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *GetNamespaceRequest) Reset()                    { *m = GetNamespaceRequest{} }
+func (*GetNamespaceRequest) ProtoMessage()               {}
+func (*GetNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{1} }
+
+type GetNamespaceResponse struct {
+	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+}
+
+func (m *GetNamespaceResponse) Reset()                    { *m = GetNamespaceResponse{} }
+func (*GetNamespaceResponse) ProtoMessage()               {}
+func (*GetNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{2} }
+
+type ListNamespacesRequest struct {
+	Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+}
+
+func (m *ListNamespacesRequest) Reset()                    { *m = ListNamespacesRequest{} }
+func (*ListNamespacesRequest) ProtoMessage()               {}
+func (*ListNamespacesRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{3} }
+
+type ListNamespacesResponse struct {
+	Namespaces []Namespace `protobuf:"bytes,1,rep,name=namespaces" json:"namespaces"`
+}
+
+func (m *ListNamespacesResponse) Reset()                    { *m = ListNamespacesResponse{} }
+func (*ListNamespacesResponse) ProtoMessage()               {}
+func (*ListNamespacesResponse) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{4} }
+
+type CreateNamespaceRequest struct {
+	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+}
+
+func (m *CreateNamespaceRequest) Reset()                    { *m = CreateNamespaceRequest{} }
+func (*CreateNamespaceRequest) ProtoMessage()               {}
+func (*CreateNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{5} }
+
+type CreateNamespaceResponse struct {
+	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+}
+
+func (m *CreateNamespaceResponse) Reset()                    { *m = CreateNamespaceResponse{} }
+func (*CreateNamespaceResponse) ProtoMessage()               {}
+func (*CreateNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{6} }
+
+// UpdateNamespaceRequest updates the metadata for a namespace.
+//
+// The operation should follow semantics described in
+// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
+// unless otherwise qualified.
+type UpdateNamespaceRequest struct {
+	// Namespace provides the target value, as declared by the mask, for the update.
+	//
+	// The namespace field must be set.
+	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	//
+	// For the most part, this applies only to selectively updating labels on
+	// the namespace. While field masks are typically limited to ascii alphas
+	// and digits, we just take everything after the "labels." as the map key.
+	UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+}
+
+func (m *UpdateNamespaceRequest) Reset()                    { *m = UpdateNamespaceRequest{} }
+func (*UpdateNamespaceRequest) ProtoMessage()               {}
+func (*UpdateNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{7} }
+
+type UpdateNamespaceResponse struct {
+	Namespace Namespace `protobuf:"bytes,1,opt,name=namespace" json:"namespace"`
+}
+
+func (m *UpdateNamespaceResponse) Reset()                    { *m = UpdateNamespaceResponse{} }
+func (*UpdateNamespaceResponse) ProtoMessage()               {}
+func (*UpdateNamespaceResponse) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{8} }
+
+type DeleteNamespaceRequest struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+}
+
+func (m *DeleteNamespaceRequest) Reset()                    { *m = DeleteNamespaceRequest{} }
+func (*DeleteNamespaceRequest) ProtoMessage()               {}
+func (*DeleteNamespaceRequest) Descriptor() ([]byte, []int) { return fileDescriptorNamespace, []int{9} }
+
+func init() {
+	proto.RegisterType((*Namespace)(nil), "containerd.services.namespaces.v1.Namespace")
+	proto.RegisterType((*GetNamespaceRequest)(nil), "containerd.services.namespaces.v1.GetNamespaceRequest")
+	proto.RegisterType((*GetNamespaceResponse)(nil), "containerd.services.namespaces.v1.GetNamespaceResponse")
+	proto.RegisterType((*ListNamespacesRequest)(nil), "containerd.services.namespaces.v1.ListNamespacesRequest")
+	proto.RegisterType((*ListNamespacesResponse)(nil), "containerd.services.namespaces.v1.ListNamespacesResponse")
+	proto.RegisterType((*CreateNamespaceRequest)(nil), "containerd.services.namespaces.v1.CreateNamespaceRequest")
+	proto.RegisterType((*CreateNamespaceResponse)(nil), "containerd.services.namespaces.v1.CreateNamespaceResponse")
+	proto.RegisterType((*UpdateNamespaceRequest)(nil), "containerd.services.namespaces.v1.UpdateNamespaceRequest")
+	proto.RegisterType((*UpdateNamespaceResponse)(nil), "containerd.services.namespaces.v1.UpdateNamespaceResponse")
+	proto.RegisterType((*DeleteNamespaceRequest)(nil), "containerd.services.namespaces.v1.DeleteNamespaceRequest")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Namespaces service
+
+type NamespacesClient interface {
+	Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error)
+	List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error)
+	Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error)
+	Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error)
+	Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+}
+
+type namespacesClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewNamespacesClient(cc *grpc.ClientConn) NamespacesClient {
+	return &namespacesClient{cc}
+}
+
+func (c *namespacesClient) Get(ctx context.Context, in *GetNamespaceRequest, opts ...grpc.CallOption) (*GetNamespaceResponse, error) {
+	out := new(GetNamespaceResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Get", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *namespacesClient) List(ctx context.Context, in *ListNamespacesRequest, opts ...grpc.CallOption) (*ListNamespacesResponse, error) {
+	out := new(ListNamespacesResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/List", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *namespacesClient) Create(ctx context.Context, in *CreateNamespaceRequest, opts ...grpc.CallOption) (*CreateNamespaceResponse, error) {
+	out := new(CreateNamespaceResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Create", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *namespacesClient) Update(ctx context.Context, in *UpdateNamespaceRequest, opts ...grpc.CallOption) (*UpdateNamespaceResponse, error) {
+	out := new(UpdateNamespaceResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Update", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *namespacesClient) Delete(ctx context.Context, in *DeleteNamespaceRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.namespaces.v1.Namespaces/Delete", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Namespaces service
+
+type NamespacesServer interface {
+	Get(context.Context, *GetNamespaceRequest) (*GetNamespaceResponse, error)
+	List(context.Context, *ListNamespacesRequest) (*ListNamespacesResponse, error)
+	Create(context.Context, *CreateNamespaceRequest) (*CreateNamespaceResponse, error)
+	Update(context.Context, *UpdateNamespaceRequest) (*UpdateNamespaceResponse, error)
+	Delete(context.Context, *DeleteNamespaceRequest) (*google_protobuf1.Empty, error)
+}
+
+func RegisterNamespacesServer(s *grpc.Server, srv NamespacesServer) {
+	s.RegisterService(&_Namespaces_serviceDesc, srv)
+}
+
+func _Namespaces_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetNamespaceRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NamespacesServer).Get(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.namespaces.v1.Namespaces/Get",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NamespacesServer).Get(ctx, req.(*GetNamespaceRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Namespaces_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListNamespacesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NamespacesServer).List(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.namespaces.v1.Namespaces/List",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NamespacesServer).List(ctx, req.(*ListNamespacesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Namespaces_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateNamespaceRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NamespacesServer).Create(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.namespaces.v1.Namespaces/Create",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NamespacesServer).Create(ctx, req.(*CreateNamespaceRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Namespaces_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateNamespaceRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NamespacesServer).Update(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.namespaces.v1.Namespaces/Update",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NamespacesServer).Update(ctx, req.(*UpdateNamespaceRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Namespaces_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteNamespaceRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(NamespacesServer).Delete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.namespaces.v1.Namespaces/Delete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(NamespacesServer).Delete(ctx, req.(*DeleteNamespaceRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Namespaces_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.namespaces.v1.Namespaces",
+	HandlerType: (*NamespacesServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Get",
+			Handler:    _Namespaces_Get_Handler,
+		},
+		{
+			MethodName: "List",
+			Handler:    _Namespaces_List_Handler,
+		},
+		{
+			MethodName: "Create",
+			Handler:    _Namespaces_Create_Handler,
+		},
+		{
+			MethodName: "Update",
+			Handler:    _Namespaces_Update_Handler,
+		},
+		{
+			MethodName: "Delete",
+			Handler:    _Namespaces_Delete_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto",
+}
+
+func (m *Namespace) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Namespace) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x12
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
+			i = encodeVarintNamespace(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintNamespace(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintNamespace(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *GetNamespaceRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetNamespaceRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func (m *GetNamespaceResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetNamespaceResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
+	n1, err := m.Namespace.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+	return i, nil
+}
+
+func (m *ListNamespacesRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListNamespacesRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Filter)))
+		i += copy(dAtA[i:], m.Filter)
+	}
+	return i, nil
+}
+
+func (m *ListNamespacesResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListNamespacesResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Namespaces) > 0 {
+		for _, msg := range m.Namespaces {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintNamespace(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *CreateNamespaceRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateNamespaceRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
+	n2, err := m.Namespace.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	return i, nil
+}
+
+func (m *CreateNamespaceResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateNamespaceResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
+	n3, err := m.Namespace.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	return i, nil
+}
+
+func (m *UpdateNamespaceRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateNamespaceRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
+	n4, err := m.Namespace.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n4
+	if m.UpdateMask != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintNamespace(dAtA, i, uint64(m.UpdateMask.Size()))
+		n5, err := m.UpdateMask.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n5
+	}
+	return i, nil
+}
+
+func (m *UpdateNamespaceResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateNamespaceResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintNamespace(dAtA, i, uint64(m.Namespace.Size()))
+	n6, err := m.Namespace.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n6
+	return i, nil
+}
+
+func (m *DeleteNamespaceRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteNamespaceRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintNamespace(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	return i, nil
+}
+
+func encodeFixed64Namespace(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Namespace(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintNamespace(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Namespace) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovNamespace(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovNamespace(uint64(len(k))) + 1 + len(v) + sovNamespace(uint64(len(v)))
+			n += mapEntrySize + 1 + sovNamespace(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *GetNamespaceRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovNamespace(uint64(l))
+	}
+	return n
+}
+
+func (m *GetNamespaceResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Namespace.Size()
+	n += 1 + l + sovNamespace(uint64(l))
+	return n
+}
+
+func (m *ListNamespacesRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Filter)
+	if l > 0 {
+		n += 1 + l + sovNamespace(uint64(l))
+	}
+	return n
+}
+
+func (m *ListNamespacesResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Namespaces) > 0 {
+		for _, e := range m.Namespaces {
+			l = e.Size()
+			n += 1 + l + sovNamespace(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *CreateNamespaceRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Namespace.Size()
+	n += 1 + l + sovNamespace(uint64(l))
+	return n
+}
+
+func (m *CreateNamespaceResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Namespace.Size()
+	n += 1 + l + sovNamespace(uint64(l))
+	return n
+}
+
+func (m *UpdateNamespaceRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Namespace.Size()
+	n += 1 + l + sovNamespace(uint64(l))
+	if m.UpdateMask != nil {
+		l = m.UpdateMask.Size()
+		n += 1 + l + sovNamespace(uint64(l))
+	}
+	return n
+}
+
+func (m *UpdateNamespaceResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Namespace.Size()
+	n += 1 + l + sovNamespace(uint64(l))
+	return n
+}
+
+func (m *DeleteNamespaceRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovNamespace(uint64(l))
+	}
+	return n
+}
+
+func sovNamespace(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozNamespace(x uint64) (n int) {
+	return sovNamespace(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Namespace) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&Namespace{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetNamespaceRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetNamespaceRequest{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetNamespaceResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetNamespaceResponse{`,
+		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListNamespacesRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListNamespacesRequest{`,
+		`Filter:` + fmt.Sprintf("%v", this.Filter) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListNamespacesResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListNamespacesResponse{`,
+		`Namespaces:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Namespaces), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateNamespaceRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateNamespaceRequest{`,
+		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateNamespaceResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateNamespaceResponse{`,
+		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateNamespaceRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateNamespaceRequest{`,
+		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf2.FieldMask", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateNamespaceResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateNamespaceResponse{`,
+		`Namespace:` + strings.Replace(strings.Replace(this.Namespace.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteNamespaceRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteNamespaceRequest{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringNamespace(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Namespace) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Namespace: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowNamespace
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowNamespace
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthNamespace
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetNamespaceRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetNamespaceRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetNamespaceResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetNamespaceResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListNamespacesRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListNamespacesRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListNamespacesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListNamespacesResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListNamespacesResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListNamespacesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Namespaces = append(m.Namespaces, Namespace{})
+			if err := m.Namespaces[len(m.Namespaces)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateNamespaceRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateNamespaceRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateNamespaceResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateNamespaceResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateNamespaceRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateNamespaceRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.UpdateMask == nil {
+				m.UpdateMask = &google_protobuf2.FieldMask{}
+			}
+			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateNamespaceResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateNamespaceResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateNamespaceResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Namespace.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteNamespaceRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteNamespaceRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteNamespaceRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipNamespace(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthNamespace
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipNamespace(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowNamespace
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowNamespace
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthNamespace
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowNamespace
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipNamespace(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthNamespace = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowNamespace   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto", fileDescriptorNamespace)
+}
+
+var fileDescriptorNamespace = []byte{
+	// 547 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0x4d, 0x6f, 0xd3, 0x40,
+	0x10, 0xcd, 0x26, 0xc1, 0x52, 0xc6, 0x17, 0xb4, 0x04, 0x13, 0x19, 0xc9, 0x04, 0x9f, 0x8a, 0x54,
+	0xad, 0xd5, 0x20, 0x41, 0x3f, 0x6e, 0x85, 0xb6, 0x07, 0x0a, 0x42, 0x96, 0xb8, 0xc0, 0x01, 0x9c,
+	0x64, 0xe3, 0x9a, 0x38, 0xb6, 0xf1, 0xae, 0x2d, 0x45, 0x1c, 0xe0, 0xdf, 0x70, 0xe1, 0x87, 0xe4,
+	0xc8, 0x91, 0x13, 0x6a, 0xf3, 0x4b, 0xd0, 0xae, 0x9d, 0x38, 0x34, 0x46, 0xb8, 0x81, 0x72, 0x9b,
+	0xb1, 0xf7, 0xcd, 0x7b, 0x3b, 0x7a, 0xcf, 0x86, 0x67, 0xae, 0xc7, 0xcf, 0x92, 0x3e, 0x19, 0x84,
+	0x13, 0x6b, 0x10, 0x06, 0xdc, 0xf1, 0x02, 0x1a, 0x0f, 0x57, 0x4b, 0x27, 0xf2, 0x2c, 0x46, 0xe3,
+	0xd4, 0x1b, 0x50, 0x66, 0x05, 0xce, 0x84, 0xb2, 0xc8, 0x11, 0x65, 0xba, 0x53, 0x74, 0x24, 0x8a,
+	0x43, 0x1e, 0xe2, 0xfb, 0x05, 0x8c, 0x2c, 0x20, 0xa4, 0x80, 0x90, 0x74, 0x47, 0x6f, 0xbb, 0xa1,
+	0x1b, 0xca, 0xd3, 0x96, 0xa8, 0x32, 0xa0, 0x7e, 0xd7, 0x0d, 0x43, 0xd7, 0xa7, 0x96, 0xec, 0xfa,
+	0xc9, 0xc8, 0xa2, 0x93, 0x88, 0x4f, 0xf3, 0x97, 0xdd, 0xcb, 0x2f, 0x47, 0x1e, 0xf5, 0x87, 0x6f,
+	0x27, 0x0e, 0x1b, 0x67, 0x27, 0xcc, 0xaf, 0x08, 0x5a, 0x2f, 0x16, 0x34, 0x18, 0x43, 0x53, 0x70,
+	0x76, 0x50, 0x17, 0x6d, 0xb5, 0x6c, 0x59, 0xe3, 0x97, 0xa0, 0xf8, 0x4e, 0x9f, 0xfa, 0xac, 0x53,
+	0xef, 0x36, 0xb6, 0xd4, 0xde, 0x2e, 0xf9, 0xa3, 0x54, 0xb2, 0x9c, 0x48, 0x4e, 0x25, 0xf4, 0x28,
+	0xe0, 0xf1, 0xd4, 0xce, 0xe7, 0xe8, 0x7b, 0xa0, 0xae, 0x3c, 0xc6, 0x37, 0xa1, 0x31, 0xa6, 0xd3,
+	0x9c, 0x53, 0x94, 0xb8, 0x0d, 0x37, 0x52, 0xc7, 0x4f, 0x68, 0xa7, 0x2e, 0x9f, 0x65, 0xcd, 0x7e,
+	0x7d, 0x17, 0x99, 0x0f, 0xe0, 0xd6, 0x09, 0xe5, 0xcb, 0xf1, 0x36, 0xfd, 0x90, 0x50, 0xc6, 0xcb,
+	0x74, 0x9b, 0x67, 0xd0, 0xfe, 0xf5, 0x28, 0x8b, 0xc2, 0x80, 0x89, 0xfb, 0xb4, 0x96, 0x62, 0x25,
+	0x40, 0xed, 0x6d, 0x5f, 0xe5, 0x4a, 0x87, 0xcd, 0xd9, 0x8f, 0x7b, 0x35, 0xbb, 0x18, 0x62, 0x5a,
+	0x70, 0xfb, 0xd4, 0x63, 0x05, 0x15, 0x5b, 0xc8, 0xd2, 0x40, 0x19, 0x79, 0x3e, 0xa7, 0x71, 0x2e,
+	0x2c, 0xef, 0x4c, 0x1f, 0xb4, 0xcb, 0x80, 0x5c, 0x9c, 0x0d, 0x50, 0xd0, 0x76, 0x90, 0x5c, 0xf8,
+	0x26, 0xea, 0x56, 0xa6, 0x98, 0xef, 0x41, 0x7b, 0x12, 0x53, 0x87, 0xd3, 0xb5, 0xb5, 0xfd, 0xfb,
+	0x55, 0x8c, 0xe1, 0xce, 0x1a, 0xd7, 0xb5, 0xed, 0xfd, 0x0b, 0x02, 0xed, 0x55, 0x34, 0xfc, 0x2f,
+	0x37, 0xc3, 0x07, 0xa0, 0x26, 0x92, 0x4b, 0xa6, 0x47, 0x3a, 0x53, 0xed, 0xe9, 0x24, 0x0b, 0x18,
+	0x59, 0x04, 0x8c, 0x1c, 0x8b, 0x80, 0x3d, 0x77, 0xd8, 0xd8, 0x86, 0xec, 0xb8, 0xa8, 0xc5, 0x5a,
+	0xd6, 0x84, 0x5e, 0xdb, 0x5a, 0xb6, 0x41, 0x7b, 0x4a, 0x7d, 0x5a, 0xb2, 0x95, 0x92, 0x98, 0xf4,
+	0xce, 0x9b, 0x00, 0x85, 0x11, 0x71, 0x0a, 0x8d, 0x13, 0xca, 0xf1, 0xa3, 0x0a, 0x12, 0x4a, 0x82,
+	0xa8, 0x3f, 0xbe, 0x32, 0x2e, 0x5f, 0xc3, 0x47, 0x68, 0x8a, 0x48, 0xe0, 0x2a, 0x5f, 0x97, 0xd2,
+	0xb0, 0xe9, 0x7b, 0x1b, 0x20, 0x73, 0xf2, 0x4f, 0xa0, 0x64, 0xae, 0xc5, 0x55, 0x86, 0x94, 0x87,
+	0x49, 0xdf, 0xdf, 0x04, 0x5a, 0x08, 0xc8, 0xfc, 0x51, 0x49, 0x40, 0xb9, 0xe7, 0x2b, 0x09, 0xf8,
+	0x9d, 0x0b, 0xdf, 0x80, 0x92, 0x79, 0xa6, 0x92, 0x80, 0x72, 0x7b, 0xe9, 0xda, 0x5a, 0x1a, 0x8e,
+	0xc4, 0xbf, 0xe8, 0xf0, 0xdd, 0xec, 0xc2, 0xa8, 0x7d, 0xbf, 0x30, 0x6a, 0x9f, 0xe7, 0x06, 0x9a,
+	0xcd, 0x0d, 0xf4, 0x6d, 0x6e, 0xa0, 0xf3, 0xb9, 0x81, 0x5e, 0x1f, 0xff, 0xc5, 0x2f, 0xf4, 0xa0,
+	0xe8, 0xfa, 0x8a, 0x64, 0x7c, 0xf8, 0x33, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xe8, 0x4d, 0xe1, 0x93,
+	0x07, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
new file mode 100644
index 0000000..d58a8c2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/namespaces/v1/namespace.proto
@@ -0,0 +1,92 @@
+syntax = "proto3";
+
+package containerd.services.namespaces.v1;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/namespaces/v1;namespaces";
+
+// Namespaces provides the ability to manipulate containerd namespaces.
+//
+// All objects in the system are required to be a member of a namespace. If a
+// namespace is deleted, all objects, including containers, images and
+// snapshots, will be deleted, as well.
+//
+// Unless otherwise noted, operations in containerd apply only to the namespace
+// supplied per request.
+//
+// I hope this goes without saying, but namespaces are themselves NOT
+// namespaced.
+service Namespaces {
+	rpc Get(GetNamespaceRequest) returns (GetNamespaceResponse);
+	rpc List(ListNamespacesRequest) returns (ListNamespacesResponse);
+	rpc Create(CreateNamespaceRequest) returns (CreateNamespaceResponse);
+	rpc Update(UpdateNamespaceRequest) returns (UpdateNamespaceResponse);
+	rpc Delete(DeleteNamespaceRequest) returns (google.protobuf.Empty);
+}
+
+message Namespace {
+	string name = 1;
+
+	// Labels provides an area to include arbitrary data on namespaces.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	//
+	// Note that to add a new value to this field, read the existing set and
+	// include the entire result in the update call.
+	map<string, string> labels  = 2;
+}
+
+message GetNamespaceRequest {
+	string name = 1;
+}
+
+message GetNamespaceResponse {
+	Namespace namespace = 1 [(gogoproto.nullable) = false];
+}
+
+message ListNamespacesRequest {
+	string filter = 1;
+}
+
+message ListNamespacesResponse {
+	repeated Namespace namespaces = 1 [(gogoproto.nullable) = false];
+}
+
+message CreateNamespaceRequest {
+	Namespace namespace = 1 [(gogoproto.nullable) = false];
+}
+
+message CreateNamespaceResponse {
+	Namespace namespace = 1 [(gogoproto.nullable) = false];
+}
+
+// UpdateNamespaceRequest updates the metadata for a namespace.
+//
+// The operation should follow semantics described in
+// https://developers.google.com/protocol-buffers/docs/reference/csharp/class/google/protobuf/well-known-types/field-mask,
+// unless otherwise qualified.
+message UpdateNamespaceRequest {
+	// Namespace provides the target value, as declared by the mask, for the update.
+	//
+	// The namespace field must be set.
+	Namespace namespace = 1 [(gogoproto.nullable) = false];
+
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	//
+	// For the most part, this applies only to selectively updating labels on
+	// the namespace. While field masks are typically limited to ascii alphas
+	// and digits, we just take everything after the "labels." as the map key.
+	google.protobuf.FieldMask update_mask = 2;
+}
+
+message UpdateNamespaceResponse {
+	Namespace namespace = 1 [(gogoproto.nullable) = false];
+}
+
+message DeleteNamespaceRequest {
+	string name = 1;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go
new file mode 100644
index 0000000..ae3c792
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go
@@ -0,0 +1,4275 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
+// DO NOT EDIT!
+
+/*
+	Package snapshot is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
+
+	It has these top-level messages:
+		PrepareSnapshotRequest
+		PrepareSnapshotResponse
+		ViewSnapshotRequest
+		ViewSnapshotResponse
+		MountsRequest
+		MountsResponse
+		RemoveSnapshotRequest
+		CommitSnapshotRequest
+		StatSnapshotRequest
+		Info
+		StatSnapshotResponse
+		UpdateSnapshotRequest
+		UpdateSnapshotResponse
+		ListSnapshotsRequest
+		ListSnapshotsResponse
+		UsageRequest
+		UsageResponse
+*/
+package snapshot
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf2 "github.com/gogo/protobuf/types"
+import _ "github.com/gogo/protobuf/types"
+import containerd_types "github.com/containerd/containerd/api/types"
+
+import time "time"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Kind int32
+
+const (
+	KindUnknown   Kind = 0
+	KindView      Kind = 1
+	KindActive    Kind = 2
+	KindCommitted Kind = 3
+)
+
+var Kind_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "VIEW",
+	2: "ACTIVE",
+	3: "COMMITTED",
+}
+var Kind_value = map[string]int32{
+	"UNKNOWN":   0,
+	"VIEW":      1,
+	"ACTIVE":    2,
+	"COMMITTED": 3,
+}
+
+func (x Kind) String() string {
+	return proto.EnumName(Kind_name, int32(x))
+}
+func (Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{0} }
+
+type PrepareSnapshotRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	Parent      string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"`
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *PrepareSnapshotRequest) Reset()                    { *m = PrepareSnapshotRequest{} }
+func (*PrepareSnapshotRequest) ProtoMessage()               {}
+func (*PrepareSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{0} }
+
+type PrepareSnapshotResponse struct {
+	Mounts []*containerd_types.Mount `protobuf:"bytes,1,rep,name=mounts" json:"mounts,omitempty"`
+}
+
+func (m *PrepareSnapshotResponse) Reset()                    { *m = PrepareSnapshotResponse{} }
+func (*PrepareSnapshotResponse) ProtoMessage()               {}
+func (*PrepareSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{1} }
+
+type ViewSnapshotRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	Parent      string `protobuf:"bytes,3,opt,name=parent,proto3" json:"parent,omitempty"`
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *ViewSnapshotRequest) Reset()                    { *m = ViewSnapshotRequest{} }
+func (*ViewSnapshotRequest) ProtoMessage()               {}
+func (*ViewSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{2} }
+
+type ViewSnapshotResponse struct {
+	Mounts []*containerd_types.Mount `protobuf:"bytes,1,rep,name=mounts" json:"mounts,omitempty"`
+}
+
+func (m *ViewSnapshotResponse) Reset()                    { *m = ViewSnapshotResponse{} }
+func (*ViewSnapshotResponse) ProtoMessage()               {}
+func (*ViewSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{3} }
+
+type MountsRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *MountsRequest) Reset()                    { *m = MountsRequest{} }
+func (*MountsRequest) ProtoMessage()               {}
+func (*MountsRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{4} }
+
+type MountsResponse struct {
+	Mounts []*containerd_types.Mount `protobuf:"bytes,1,rep,name=mounts" json:"mounts,omitempty"`
+}
+
+func (m *MountsResponse) Reset()                    { *m = MountsResponse{} }
+func (*MountsResponse) ProtoMessage()               {}
+func (*MountsResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{5} }
+
+type RemoveSnapshotRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *RemoveSnapshotRequest) Reset()                    { *m = RemoveSnapshotRequest{} }
+func (*RemoveSnapshotRequest) ProtoMessage()               {}
+func (*RemoveSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{6} }
+
+type CommitSnapshotRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Name        string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	Key         string `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *CommitSnapshotRequest) Reset()                    { *m = CommitSnapshotRequest{} }
+func (*CommitSnapshotRequest) ProtoMessage()               {}
+func (*CommitSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{7} }
+
+type StatSnapshotRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *StatSnapshotRequest) Reset()                    { *m = StatSnapshotRequest{} }
+func (*StatSnapshotRequest) ProtoMessage()               {}
+func (*StatSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{8} }
+
+type Info struct {
+	Name   string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"`
+	Kind   Kind   `protobuf:"varint,3,opt,name=kind,proto3,enum=containerd.services.snapshots.v1.Kind" json:"kind,omitempty"`
+	// CreatedAt provides the time at which the snapshot was created.
+	CreatedAt time.Time `protobuf:"bytes,4,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	// UpdatedAt provides the time the info was last updated.
+	UpdatedAt time.Time `protobuf:"bytes,5,opt,name=updated_at,json=updatedAt,stdtime" json:"updated_at"`
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+}
+
+func (m *Info) Reset()                    { *m = Info{} }
+func (*Info) ProtoMessage()               {}
+func (*Info) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{9} }
+
+type StatSnapshotResponse struct {
+	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+}
+
+func (m *StatSnapshotResponse) Reset()                    { *m = StatSnapshotResponse{} }
+func (*StatSnapshotResponse) ProtoMessage()               {}
+func (*StatSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{10} }
+
+type UpdateSnapshotRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Info        Info   `protobuf:"bytes,2,opt,name=info" json:"info"`
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	//
+	// In info, Name, Parent, Kind, Created are immutable,
+	// other field may be updated using this mask.
+	// If no mask is provided, all mutable field are updated.
+	UpdateMask *google_protobuf2.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"`
+}
+
+func (m *UpdateSnapshotRequest) Reset()                    { *m = UpdateSnapshotRequest{} }
+func (*UpdateSnapshotRequest) ProtoMessage()               {}
+func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{11} }
+
+type UpdateSnapshotResponse struct {
+	Info Info `protobuf:"bytes,1,opt,name=info" json:"info"`
+}
+
+func (m *UpdateSnapshotResponse) Reset()                    { *m = UpdateSnapshotResponse{} }
+func (*UpdateSnapshotResponse) ProtoMessage()               {}
+func (*UpdateSnapshotResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{12} }
+
+type ListSnapshotsRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+}
+
+func (m *ListSnapshotsRequest) Reset()                    { *m = ListSnapshotsRequest{} }
+func (*ListSnapshotsRequest) ProtoMessage()               {}
+func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{13} }
+
+type ListSnapshotsResponse struct {
+	Info []Info `protobuf:"bytes,1,rep,name=info" json:"info"`
+}
+
+func (m *ListSnapshotsResponse) Reset()                    { *m = ListSnapshotsResponse{} }
+func (*ListSnapshotsResponse) ProtoMessage()               {}
+func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{14} }
+
+type UsageRequest struct {
+	Snapshotter string `protobuf:"bytes,1,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
+	Key         string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+}
+
+func (m *UsageRequest) Reset()                    { *m = UsageRequest{} }
+func (*UsageRequest) ProtoMessage()               {}
+func (*UsageRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{15} }
+
+type UsageResponse struct {
+	Size_  int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"`
+	Inodes int64 `protobuf:"varint,2,opt,name=inodes,proto3" json:"inodes,omitempty"`
+}
+
+func (m *UsageResponse) Reset()                    { *m = UsageResponse{} }
+func (*UsageResponse) ProtoMessage()               {}
+func (*UsageResponse) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{16} }
+
+func init() {
+	proto.RegisterType((*PrepareSnapshotRequest)(nil), "containerd.services.snapshots.v1.PrepareSnapshotRequest")
+	proto.RegisterType((*PrepareSnapshotResponse)(nil), "containerd.services.snapshots.v1.PrepareSnapshotResponse")
+	proto.RegisterType((*ViewSnapshotRequest)(nil), "containerd.services.snapshots.v1.ViewSnapshotRequest")
+	proto.RegisterType((*ViewSnapshotResponse)(nil), "containerd.services.snapshots.v1.ViewSnapshotResponse")
+	proto.RegisterType((*MountsRequest)(nil), "containerd.services.snapshots.v1.MountsRequest")
+	proto.RegisterType((*MountsResponse)(nil), "containerd.services.snapshots.v1.MountsResponse")
+	proto.RegisterType((*RemoveSnapshotRequest)(nil), "containerd.services.snapshots.v1.RemoveSnapshotRequest")
+	proto.RegisterType((*CommitSnapshotRequest)(nil), "containerd.services.snapshots.v1.CommitSnapshotRequest")
+	proto.RegisterType((*StatSnapshotRequest)(nil), "containerd.services.snapshots.v1.StatSnapshotRequest")
+	proto.RegisterType((*Info)(nil), "containerd.services.snapshots.v1.Info")
+	proto.RegisterType((*StatSnapshotResponse)(nil), "containerd.services.snapshots.v1.StatSnapshotResponse")
+	proto.RegisterType((*UpdateSnapshotRequest)(nil), "containerd.services.snapshots.v1.UpdateSnapshotRequest")
+	proto.RegisterType((*UpdateSnapshotResponse)(nil), "containerd.services.snapshots.v1.UpdateSnapshotResponse")
+	proto.RegisterType((*ListSnapshotsRequest)(nil), "containerd.services.snapshots.v1.ListSnapshotsRequest")
+	proto.RegisterType((*ListSnapshotsResponse)(nil), "containerd.services.snapshots.v1.ListSnapshotsResponse")
+	proto.RegisterType((*UsageRequest)(nil), "containerd.services.snapshots.v1.UsageRequest")
+	proto.RegisterType((*UsageResponse)(nil), "containerd.services.snapshots.v1.UsageResponse")
+	proto.RegisterEnum("containerd.services.snapshots.v1.Kind", Kind_name, Kind_value)
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Snapshots service
+
+type SnapshotsClient interface {
+	Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error)
+	View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error)
+	Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error)
+	Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error)
+	Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error)
+	List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error)
+	Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error)
+}
+
+type snapshotsClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewSnapshotsClient(cc *grpc.ClientConn) SnapshotsClient {
+	return &snapshotsClient{cc}
+}
+
+func (c *snapshotsClient) Prepare(ctx context.Context, in *PrepareSnapshotRequest, opts ...grpc.CallOption) (*PrepareSnapshotResponse, error) {
+	out := new(PrepareSnapshotResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Prepare", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *snapshotsClient) View(ctx context.Context, in *ViewSnapshotRequest, opts ...grpc.CallOption) (*ViewSnapshotResponse, error) {
+	out := new(ViewSnapshotResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/View", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *snapshotsClient) Mounts(ctx context.Context, in *MountsRequest, opts ...grpc.CallOption) (*MountsResponse, error) {
+	out := new(MountsResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Mounts", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *snapshotsClient) Commit(ctx context.Context, in *CommitSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Commit", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *snapshotsClient) Remove(ctx context.Context, in *RemoveSnapshotRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Remove", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *snapshotsClient) Stat(ctx context.Context, in *StatSnapshotRequest, opts ...grpc.CallOption) (*StatSnapshotResponse, error) {
+	out := new(StatSnapshotResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Stat", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *snapshotsClient) Update(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*UpdateSnapshotResponse, error) {
+	out := new(UpdateSnapshotResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Update", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *snapshotsClient) List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (Snapshots_ListClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_Snapshots_serviceDesc.Streams[0], c.cc, "/containerd.services.snapshots.v1.Snapshots/List", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &snapshotsListClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Snapshots_ListClient interface {
+	Recv() (*ListSnapshotsResponse, error)
+	grpc.ClientStream
+}
+
+type snapshotsListClient struct {
+	grpc.ClientStream
+}
+
+func (x *snapshotsListClient) Recv() (*ListSnapshotsResponse, error) {
+	m := new(ListSnapshotsResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *snapshotsClient) Usage(ctx context.Context, in *UsageRequest, opts ...grpc.CallOption) (*UsageResponse, error) {
+	out := new(UsageResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.snapshots.v1.Snapshots/Usage", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Snapshots service
+
+type SnapshotsServer interface {
+	Prepare(context.Context, *PrepareSnapshotRequest) (*PrepareSnapshotResponse, error)
+	View(context.Context, *ViewSnapshotRequest) (*ViewSnapshotResponse, error)
+	Mounts(context.Context, *MountsRequest) (*MountsResponse, error)
+	Commit(context.Context, *CommitSnapshotRequest) (*google_protobuf1.Empty, error)
+	Remove(context.Context, *RemoveSnapshotRequest) (*google_protobuf1.Empty, error)
+	Stat(context.Context, *StatSnapshotRequest) (*StatSnapshotResponse, error)
+	Update(context.Context, *UpdateSnapshotRequest) (*UpdateSnapshotResponse, error)
+	List(*ListSnapshotsRequest, Snapshots_ListServer) error
+	Usage(context.Context, *UsageRequest) (*UsageResponse, error)
+}
+
+func RegisterSnapshotsServer(s *grpc.Server, srv SnapshotsServer) {
+	s.RegisterService(&_Snapshots_serviceDesc, srv)
+}
+
+func _Snapshots_Prepare_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PrepareSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SnapshotsServer).Prepare(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.snapshots.v1.Snapshots/Prepare",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SnapshotsServer).Prepare(ctx, req.(*PrepareSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Snapshots_View_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ViewSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SnapshotsServer).View(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.snapshots.v1.Snapshots/View",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SnapshotsServer).View(ctx, req.(*ViewSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Snapshots_Mounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MountsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SnapshotsServer).Mounts(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.snapshots.v1.Snapshots/Mounts",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SnapshotsServer).Mounts(ctx, req.(*MountsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Snapshots_Commit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CommitSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SnapshotsServer).Commit(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.snapshots.v1.Snapshots/Commit",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SnapshotsServer).Commit(ctx, req.(*CommitSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Snapshots_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(RemoveSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SnapshotsServer).Remove(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.snapshots.v1.Snapshots/Remove",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SnapshotsServer).Remove(ctx, req.(*RemoveSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Snapshots_Stat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StatSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SnapshotsServer).Stat(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.snapshots.v1.Snapshots/Stat",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SnapshotsServer).Stat(ctx, req.(*StatSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Snapshots_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateSnapshotRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SnapshotsServer).Update(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.snapshots.v1.Snapshots/Update",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SnapshotsServer).Update(ctx, req.(*UpdateSnapshotRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Snapshots_List_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(ListSnapshotsRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(SnapshotsServer).List(m, &snapshotsListServer{stream})
+}
+
+type Snapshots_ListServer interface {
+	Send(*ListSnapshotsResponse) error
+	grpc.ServerStream
+}
+
+type snapshotsListServer struct {
+	grpc.ServerStream
+}
+
+func (x *snapshotsListServer) Send(m *ListSnapshotsResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Snapshots_Usage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UsageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(SnapshotsServer).Usage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.snapshots.v1.Snapshots/Usage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(SnapshotsServer).Usage(ctx, req.(*UsageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Snapshots_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.snapshots.v1.Snapshots",
+	HandlerType: (*SnapshotsServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Prepare",
+			Handler:    _Snapshots_Prepare_Handler,
+		},
+		{
+			MethodName: "View",
+			Handler:    _Snapshots_View_Handler,
+		},
+		{
+			MethodName: "Mounts",
+			Handler:    _Snapshots_Mounts_Handler,
+		},
+		{
+			MethodName: "Commit",
+			Handler:    _Snapshots_Commit_Handler,
+		},
+		{
+			MethodName: "Remove",
+			Handler:    _Snapshots_Remove_Handler,
+		},
+		{
+			MethodName: "Stat",
+			Handler:    _Snapshots_Stat_Handler,
+		},
+		{
+			MethodName: "Update",
+			Handler:    _Snapshots_Update_Handler,
+		},
+		{
+			MethodName: "Usage",
+			Handler:    _Snapshots_Usage_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "List",
+			Handler:       _Snapshots_List_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto",
+}
+
+func (m *PrepareSnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PrepareSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	if len(m.Key) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	if len(m.Parent) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
+		i += copy(dAtA[i:], m.Parent)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x22
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
+			i = encodeVarintSnapshots(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *PrepareSnapshotResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PrepareSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Mounts) > 0 {
+		for _, msg := range m.Mounts {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *ViewSnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ViewSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	if len(m.Key) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	if len(m.Parent) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
+		i += copy(dAtA[i:], m.Parent)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x22
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
+			i = encodeVarintSnapshots(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *ViewSnapshotResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ViewSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Mounts) > 0 {
+		for _, msg := range m.Mounts {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *MountsRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MountsRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	if len(m.Key) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	return i, nil
+}
+
+func (m *MountsResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MountsResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Mounts) > 0 {
+		for _, msg := range m.Mounts {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *RemoveSnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RemoveSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	if len(m.Key) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	return i, nil
+}
+
+func (m *CommitSnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CommitSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	if len(m.Name) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.Key) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x22
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
+			i = encodeVarintSnapshots(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *StatSnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	if len(m.Key) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	return i, nil
+}
+
+func (m *Info) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Info) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Name) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Name)))
+		i += copy(dAtA[i:], m.Name)
+	}
+	if len(m.Parent) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Parent)))
+		i += copy(dAtA[i:], m.Parent)
+	}
+	if m.Kind != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(m.Kind))
+	}
+	dAtA[i] = 0x22
+	i++
+	i = encodeVarintSnapshots(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+	dAtA[i] = 0x2a
+	i++
+	i = encodeVarintSnapshots(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.UpdatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	if len(m.Labels) > 0 {
+		for k, _ := range m.Labels {
+			dAtA[i] = 0x32
+			i++
+			v := m.Labels[k]
+			mapSize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
+			i = encodeVarintSnapshots(dAtA, i, uint64(mapSize))
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(len(k)))
+			i += copy(dAtA[i:], k)
+			dAtA[i] = 0x12
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(len(v)))
+			i += copy(dAtA[i:], v)
+		}
+	}
+	return i, nil
+}
+
+func (m *StatSnapshotResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintSnapshots(dAtA, i, uint64(m.Info.Size()))
+	n3, err := m.Info.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	return i, nil
+}
+
+func (m *UpdateSnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateSnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	dAtA[i] = 0x12
+	i++
+	i = encodeVarintSnapshots(dAtA, i, uint64(m.Info.Size()))
+	n4, err := m.Info.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n4
+	if m.UpdateMask != nil {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(m.UpdateMask.Size()))
+		n5, err := m.UpdateMask.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n5
+	}
+	return i, nil
+}
+
+func (m *UpdateSnapshotResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateSnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintSnapshots(dAtA, i, uint64(m.Info.Size()))
+	n6, err := m.Info.MarshalTo(dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n6
+	return i, nil
+}
+
+func (m *ListSnapshotsRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListSnapshotsRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	return i, nil
+}
+
+func (m *ListSnapshotsResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListSnapshotsResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Info) > 0 {
+		for _, msg := range m.Info {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintSnapshots(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *UsageRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UsageRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Snapshotter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Snapshotter)))
+		i += copy(dAtA[i:], m.Snapshotter)
+	}
+	if len(m.Key) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
+	}
+	return i, nil
+}
+
+func (m *UsageResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UsageResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Size_ != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(m.Size_))
+	}
+	if m.Inodes != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintSnapshots(dAtA, i, uint64(m.Inodes))
+	}
+	return i, nil
+}
+
+func encodeFixed64Snapshots(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Snapshots(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintSnapshots(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *PrepareSnapshotRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Parent)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
+			n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *PrepareSnapshotResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Mounts) > 0 {
+		for _, e := range m.Mounts {
+			l = e.Size()
+			n += 1 + l + sovSnapshots(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *ViewSnapshotRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Parent)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
+			n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *ViewSnapshotResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Mounts) > 0 {
+		for _, e := range m.Mounts {
+			l = e.Size()
+			n += 1 + l + sovSnapshots(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *MountsRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	return n
+}
+
+func (m *MountsResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Mounts) > 0 {
+		for _, e := range m.Mounts {
+			l = e.Size()
+			n += 1 + l + sovSnapshots(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *RemoveSnapshotRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	return n
+}
+
+func (m *CommitSnapshotRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
+			n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *StatSnapshotRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	return n
+}
+
+func (m *Info) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Parent)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	if m.Kind != 0 {
+		n += 1 + sovSnapshots(uint64(m.Kind))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
+	n += 1 + l + sovSnapshots(uint64(l))
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.UpdatedAt)
+	n += 1 + l + sovSnapshots(uint64(l))
+	if len(m.Labels) > 0 {
+		for k, v := range m.Labels {
+			_ = k
+			_ = v
+			mapEntrySize := 1 + len(k) + sovSnapshots(uint64(len(k))) + 1 + len(v) + sovSnapshots(uint64(len(v)))
+			n += mapEntrySize + 1 + sovSnapshots(uint64(mapEntrySize))
+		}
+	}
+	return n
+}
+
+func (m *StatSnapshotResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Info.Size()
+	n += 1 + l + sovSnapshots(uint64(l))
+	return n
+}
+
+func (m *UpdateSnapshotRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = m.Info.Size()
+	n += 1 + l + sovSnapshots(uint64(l))
+	if m.UpdateMask != nil {
+		l = m.UpdateMask.Size()
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	return n
+}
+
+func (m *UpdateSnapshotResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = m.Info.Size()
+	n += 1 + l + sovSnapshots(uint64(l))
+	return n
+}
+
+func (m *ListSnapshotsRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	return n
+}
+
+func (m *ListSnapshotsResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Info) > 0 {
+		for _, e := range m.Info {
+			l = e.Size()
+			n += 1 + l + sovSnapshots(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *UsageRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Snapshotter)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovSnapshots(uint64(l))
+	}
+	return n
+}
+
+func (m *UsageResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Size_ != 0 {
+		n += 1 + sovSnapshots(uint64(m.Size_))
+	}
+	if m.Inodes != 0 {
+		n += 1 + sovSnapshots(uint64(m.Inodes))
+	}
+	return n
+}
+
+func sovSnapshots(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozSnapshots(x uint64) (n int) {
+	return sovSnapshots(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *PrepareSnapshotRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&PrepareSnapshotRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PrepareSnapshotResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PrepareSnapshotResponse{`,
+		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ViewSnapshotRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&ViewSnapshotRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ViewSnapshotResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ViewSnapshotResponse{`,
+		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MountsRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&MountsRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MountsResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&MountsResponse{`,
+		`Mounts:` + strings.Replace(fmt.Sprintf("%v", this.Mounts), "Mount", "containerd_types.Mount", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *RemoveSnapshotRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RemoveSnapshotRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CommitSnapshotRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&CommitSnapshotRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StatSnapshotRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StatSnapshotRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Info) String() string {
+	if this == nil {
+		return "nil"
+	}
+	keysForLabels := make([]string, 0, len(this.Labels))
+	for k, _ := range this.Labels {
+		keysForLabels = append(keysForLabels, k)
+	}
+	github_com_gogo_protobuf_sortkeys.Strings(keysForLabels)
+	mapStringForLabels := "map[string]string{"
+	for _, k := range keysForLabels {
+		mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k])
+	}
+	mapStringForLabels += "}"
+	s := strings.Join([]string{`&Info{`,
+		`Name:` + fmt.Sprintf("%v", this.Name) + `,`,
+		`Parent:` + fmt.Sprintf("%v", this.Parent) + `,`,
+		`Kind:` + fmt.Sprintf("%v", this.Kind) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`Labels:` + mapStringForLabels + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StatSnapshotResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StatSnapshotResponse{`,
+		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateSnapshotRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateSnapshotRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`UpdateMask:` + strings.Replace(fmt.Sprintf("%v", this.UpdateMask), "FieldMask", "google_protobuf2.FieldMask", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateSnapshotResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateSnapshotResponse{`,
+		`Info:` + strings.Replace(strings.Replace(this.Info.String(), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListSnapshotsRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListSnapshotsRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListSnapshotsResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListSnapshotsResponse{`,
+		`Info:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Info), "Info", "Info", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UsageRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UsageRequest{`,
+		`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
+		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UsageResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UsageResponse{`,
+		`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
+		`Inodes:` + fmt.Sprintf("%v", this.Inodes) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringSnapshots(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *PrepareSnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PrepareSnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PrepareSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Parent = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthSnapshots
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PrepareSnapshotResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PrepareSnapshotResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PrepareSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Mounts = append(m.Mounts, &containerd_types.Mount{})
+			if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ViewSnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ViewSnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ViewSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Parent = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthSnapshots
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ViewSnapshotResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ViewSnapshotResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ViewSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Mounts = append(m.Mounts, &containerd_types.Mount{})
+			if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MountsRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MountsRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MountsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MountsResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MountsResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MountsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Mounts = append(m.Mounts, &containerd_types.Mount{})
+			if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RemoveSnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RemoveSnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RemoveSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CommitSnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CommitSnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CommitSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthSnapshots
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatSnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatSnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Info) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Info: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Parent", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Parent = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType)
+			}
+			m.Kind = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Kind |= (Kind(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.UpdatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			var keykey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				keykey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			var stringLenmapkey uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLenmapkey |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLenmapkey := int(stringLenmapkey)
+			if intStringLenmapkey < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postStringIndexmapkey := iNdEx + intStringLenmapkey
+			if postStringIndexmapkey > l {
+				return io.ErrUnexpectedEOF
+			}
+			mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
+			iNdEx = postStringIndexmapkey
+			if m.Labels == nil {
+				m.Labels = make(map[string]string)
+			}
+			if iNdEx < postIndex {
+				var valuekey uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					valuekey |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				var stringLenmapvalue uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					stringLenmapvalue |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				intStringLenmapvalue := int(stringLenmapvalue)
+				if intStringLenmapvalue < 0 {
+					return ErrInvalidLengthSnapshots
+				}
+				postStringIndexmapvalue := iNdEx + intStringLenmapvalue
+				if postStringIndexmapvalue > l {
+					return io.ErrUnexpectedEOF
+				}
+				mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
+				iNdEx = postStringIndexmapvalue
+				m.Labels[mapkey] = mapvalue
+			} else {
+				var mapvalue string
+				m.Labels[mapkey] = mapvalue
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatSnapshotResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatSnapshotResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateSnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateSnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateSnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UpdateMask", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.UpdateMask == nil {
+				m.UpdateMask = &google_protobuf2.FieldMask{}
+			}
+			if err := m.UpdateMask.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateSnapshotResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateSnapshotResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateSnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListSnapshotsRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListSnapshotsRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListSnapshotsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListSnapshotsResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListSnapshotsResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListSnapshotsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Info = append(m.Info, Info{})
+			if err := m.Info[len(m.Info)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UsageRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UsageRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UsageRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshotter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Snapshotter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UsageResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UsageResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UsageResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+			}
+			m.Size_ = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Size_ |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Inodes", wireType)
+			}
+			m.Inodes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Inodes |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipSnapshots(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthSnapshots
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipSnapshots(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowSnapshots
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowSnapshots
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthSnapshots
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowSnapshots
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipSnapshots(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthSnapshots = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowSnapshots   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto", fileDescriptorSnapshots)
+}
+
+var fileDescriptorSnapshots = []byte{
+	// 1004 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0x4f, 0x6f, 0x1a, 0x47,
+	0x14, 0xf7, 0xc0, 0x1a, 0xc7, 0x0f, 0xdb, 0xa5, 0x13, 0x4c, 0xd0, 0xb6, 0xc2, 0x2b, 0x0e, 0x95,
+	0xd5, 0xc3, 0x6e, 0x42, 0xd5, 0xc4, 0x89, 0x2f, 0x05, 0x42, 0x2b, 0xe2, 0xd8, 0xa9, 0x36, 0xb6,
+	0x53, 0xa7, 0x91, 0xa2, 0x35, 0x8c, 0xf1, 0x0a, 0x76, 0x97, 0x32, 0x03, 0x11, 0xad, 0x54, 0xf5,
+	0x18, 0xf9, 0xd4, 0x2f, 0xe0, 0x53, 0xfb, 0x21, 0xaa, 0x7e, 0x02, 0x1f, 0x7b, 0xec, 0xa9, 0x6d,
+	0xfc, 0x25, 0x7a, 0xea, 0x1f, 0xcd, 0xec, 0x2c, 0x60, 0x4c, 0xc5, 0xb2, 0xa1, 0xb7, 0xb7, 0x33,
+	0xf3, 0xde, 0xfb, 0xbd, 0xdf, 0x9b, 0xf7, 0xde, 0x2c, 0x54, 0x1b, 0x36, 0x3b, 0xed, 0x1e, 0xeb,
+	0x35, 0xcf, 0x31, 0x6a, 0x9e, 0xcb, 0x2c, 0xdb, 0x25, 0x9d, 0xfa, 0xa8, 0x68, 0xb5, 0x6d, 0x83,
+	0x92, 0x4e, 0xcf, 0xae, 0x11, 0x6a, 0x50, 0xd7, 0x6a, 0xd3, 0x53, 0x8f, 0x19, 0xbd, 0x3b, 0x03,
+	0x99, 0xea, 0xed, 0x8e, 0xc7, 0x3c, 0xac, 0x0d, 0x95, 0xf4, 0x40, 0x41, 0x1f, 0x1e, 0xea, 0xdd,
+	0x51, 0xd3, 0x0d, 0xaf, 0xe1, 0x89, 0xc3, 0x06, 0x97, 0x7c, 0x3d, 0xf5, 0xbd, 0x86, 0xe7, 0x35,
+	0x5a, 0xc4, 0x10, 0x5f, 0xc7, 0xdd, 0x13, 0x83, 0x38, 0x6d, 0xd6, 0x97, 0x9b, 0xda, 0xf8, 0xe6,
+	0x89, 0x4d, 0x5a, 0xf5, 0x97, 0x8e, 0x45, 0x9b, 0xf2, 0xc4, 0xc6, 0xf8, 0x09, 0x66, 0x3b, 0x84,
+	0x32, 0xcb, 0x69, 0xcb, 0x03, 0x77, 0x43, 0x85, 0xc8, 0xfa, 0x6d, 0x42, 0x0d, 0xc7, 0xeb, 0xba,
+	0xcc, 0xd7, 0xcb, 0xff, 0x85, 0x20, 0xf3, 0x79, 0x87, 0xb4, 0xad, 0x0e, 0x79, 0x2a, 0xa3, 0x30,
+	0xc9, 0x57, 0x5d, 0x42, 0x19, 0xd6, 0x20, 0x19, 0x04, 0xc6, 0x48, 0x27, 0x8b, 0x34, 0xb4, 0xb9,
+	0x6c, 0x8e, 0x2e, 0xe1, 0x14, 0xc4, 0x9b, 0xa4, 0x9f, 0x8d, 0x89, 0x1d, 0x2e, 0xe2, 0x0c, 0x24,
+	0xb8, 0x29, 0x97, 0x65, 0xe3, 0x62, 0x51, 0x7e, 0xe1, 0x17, 0x90, 0x68, 0x59, 0xc7, 0xa4, 0x45,
+	0xb3, 0x8a, 0x16, 0xdf, 0x4c, 0x16, 0x1e, 0xea, 0xd3, 0x78, 0xd4, 0x27, 0xa3, 0xd2, 0x1f, 0x0b,
+	0x33, 0x15, 0x97, 0x75, 0xfa, 0xa6, 0xb4, 0xa9, 0xde, 0x87, 0xe4, 0xc8, 0x72, 0x00, 0x0b, 0x0d,
+	0x61, 0xa5, 0x61, 0xb1, 0x67, 0xb5, 0xba, 0x44, 0x42, 0xf5, 0x3f, 0x1e, 0xc4, 0xb6, 0x50, 0xfe,
+	0x11, 0xdc, 0xba, 0xe6, 0x88, 0xb6, 0x3d, 0x97, 0x12, 0x6c, 0x40, 0x42, 0x30, 0x45, 0xb3, 0x48,
+	0x60, 0xbe, 0x35, 0x8a, 0x59, 0x30, 0xa9, 0xef, 0xf2, 0x7d, 0x53, 0x1e, 0xcb, 0xff, 0x89, 0xe0,
+	0xe6, 0xa1, 0x4d, 0x5e, 0xfd, 0x9f, 0x44, 0x1e, 0x8d, 0x11, 0x59, 0x9c, 0x4e, 0xe4, 0x04, 0x48,
+	0xf3, 0x66, 0xf1, 0x33, 0x48, 0x5f, 0xf5, 0x12, 0x95, 0xc2, 0x32, 0xac, 0x8a, 0x05, 0xfa, 0x16,
+	0xdc, 0xe5, 0x8b, 0xb0, 0x16, 0x18, 0x89, 0x8a, 0x63, 0x07, 0xd6, 0x4d, 0xe2, 0x78, 0xbd, 0x79,
+	0x14, 0x05, 0xbf, 0x17, 0xeb, 0x65, 0xcf, 0x71, 0x6c, 0x36, 0xbb, 0x35, 0x0c, 0x8a, 0x6b, 0x39,
+	0x01, 0xe5, 0x42, 0x0e, 0x3c, 0xc4, 0x87, 0x99, 0xf9, 0x72, 0xec, 0x56, 0x94, 0xa7, 0xdf, 0x8a,
+	0x89, 0x80, 0xe6, 0x7d, 0x2f, 0xaa, 0x70, 0xf3, 0x29, 0xb3, 0xd8, 0x3c, 0x48, 0xfc, 0x27, 0x06,
+	0x4a, 0xd5, 0x3d, 0xf1, 0x06, 0x8c, 0xa0, 0x11, 0x46, 0x86, 0xd5, 0x12, 0xbb, 0x52, 0x2d, 0x0f,
+	0x40, 0x69, 0xda, 0x6e, 0x5d, 0x50, 0xb5, 0x56, 0xf8, 0x60, 0x3a, 0x2b, 0x3b, 0xb6, 0x5b, 0x37,
+	0x85, 0x0e, 0x2e, 0x03, 0xd4, 0x3a, 0xc4, 0x62, 0xa4, 0xfe, 0xd2, 0x62, 0x59, 0x45, 0x43, 0x9b,
+	0xc9, 0x82, 0xaa, 0xfb, 0x7d, 0x58, 0x0f, 0xfa, 0xb0, 0xbe, 0x1f, 0xf4, 0xe1, 0xd2, 0x8d, 0x8b,
+	0xdf, 0x36, 0x16, 0xbe, 0xff, 0x7d, 0x03, 0x99, 0xcb, 0x52, 0xaf, 0xc8, 0xb8, 0x91, 0x6e, 0xbb,
+	0x1e, 0x18, 0x59, 0x9c, 0xc5, 0x88, 0xd4, 0x2b, 0x32, 0xfc, 0x68, 0x90, 0xdd, 0x84, 0xc8, 0x6e,
+	0x61, 0x7a, 0x1c, 0x9c, 0xa9, 0x79, 0x27, 0xf3, 0x0b, 0x48, 0x5f, 0x4d, 0xa6, 0x2c, 0xae, 0x4f,
+	0x40, 0xb1, 0xdd, 0x13, 0x4f, 0x18, 0x49, 0x86, 0x21, 0x99, 0x83, 0x2b, 0x29, 0x3c, 0x52, 0x53,
+	0x68, 0xe6, 0x7f, 0x42, 0xb0, 0x7e, 0x20, 0xc2, 0x9d, 0xfd, 0xa6, 0x04, 0xde, 0x63, 0x51, 0xbd,
+	0xe3, 0x6d, 0x48, 0xfa, 0x5c, 0x8b, 0x81, 0x2b, 0xee, 0xca, 0xa4, 0x24, 0x7d, 0xca, 0x67, 0xf2,
+	0xae, 0x45, 0x9b, 0xa6, 0x4c, 0x29, 0x97, 0xf3, 0xcf, 0x21, 0x33, 0x8e, 0x7c, 0x6e, 0xb4, 0x6c,
+	0x41, 0xfa, 0xb1, 0x4d, 0x07, 0x84, 0x87, 0xef, 0x89, 0xf9, 0x23, 0x58, 0x1f, 0xd3, 0xbc, 0x06,
+	0x2a, 0x1e, 0x11, 0x54, 0x09, 0x56, 0x0e, 0xa8, 0xd5, 0x20, 0x6f, 0x53, 0xcb, 0xdb, 0xb0, 0x2a,
+	0x6d, 0x48, 0x58, 0x18, 0x14, 0x6a, 0x7f, 0xed, 0xd7, 0x74, 0xdc, 0x14, 0x32, 0xaf, 0x69, 0xdb,
+	0xf5, 0xea, 0x84, 0x0a, 0xcd, 0xb8, 0x29, 0xbf, 0x3e, 0x7c, 0x8d, 0x40, 0xe1, 0x65, 0x8a, 0xdf,
+	0x87, 0xa5, 0x83, 0xbd, 0x9d, 0xbd, 0x27, 0xcf, 0xf6, 0x52, 0x0b, 0xea, 0x3b, 0x67, 0xe7, 0x5a,
+	0x92, 0x2f, 0x1f, 0xb8, 0x4d, 0xd7, 0x7b, 0xe5, 0xe2, 0x0c, 0x28, 0x87, 0xd5, 0xca, 0xb3, 0x14,
+	0x52, 0x57, 0xce, 0xce, 0xb5, 0x1b, 0x7c, 0x8b, 0x8f, 0x28, 0xac, 0x42, 0xa2, 0x58, 0xde, 0xaf,
+	0x1e, 0x56, 0x52, 0x31, 0x75, 0xed, 0xec, 0x5c, 0x03, 0xbe, 0x53, 0xac, 0x31, 0xbb, 0x47, 0xb0,
+	0x06, 0xcb, 0xe5, 0x27, 0xbb, 0xbb, 0xd5, 0xfd, 0xfd, 0xca, 0xc3, 0x54, 0x5c, 0x7d, 0xf7, 0xec,
+	0x5c, 0x5b, 0xe5, 0xdb, 0x7e, 0xaf, 0x64, 0xa4, 0xae, 0xae, 0xbc, 0xfe, 0x21, 0xb7, 0xf0, 0xf3,
+	0x8f, 0x39, 0x81, 0xa0, 0xf0, 0xf7, 0x12, 0x2c, 0x0f, 0x38, 0xc6, 0xdf, 0xc2, 0x92, 0x7c, 0x4a,
+	0xe0, 0xad, 0xa8, 0xcf, 0x1b, 0xf5, 0x7e, 0x04, 0x4d, 0x49, 0x62, 0x17, 0x14, 0x11, 0xe1, 0xc7,
+	0x91, 0x9e, 0x04, 0xea, 0xdd, 0x59, 0xd5, 0xa4, 0xdb, 0x26, 0x24, 0xfc, 0x69, 0x8b, 0x8d, 0xe9,
+	0x16, 0xae, 0x0c, 0x77, 0xf5, 0x76, 0x78, 0x05, 0xe9, 0xec, 0x08, 0x12, 0x7e, 0x32, 0xf0, 0xbd,
+	0x88, 0x23, 0x4e, 0xcd, 0x5c, 0xab, 0xec, 0x0a, 0x7f, 0x8a, 0x73, 0xd3, 0xfe, 0xc8, 0x0f, 0x63,
+	0x7a, 0xe2, 0xe3, 0xe0, 0x3f, 0x4d, 0x77, 0x41, 0xe1, 0x9d, 0x33, 0x4c, 0x66, 0x26, 0x8c, 0xcb,
+	0x30, 0x99, 0x99, 0xd8, 0x98, 0xbf, 0x81, 0x84, 0xdf, 0x9b, 0xc2, 0x44, 0x34, 0xb1, 0xff, 0xaa,
+	0x5b, 0xb3, 0x2b, 0x4a, 0xe7, 0x7d, 0x50, 0x78, 0x0b, 0xc2, 0x21, 0xc0, 0x4f, 0x6a, 0x72, 0xea,
+	0xbd, 0x99, 0xf5, 0x7c, 0xc7, 0xb7, 0x11, 0x3e, 0x85, 0x45, 0xd1, 0x5e, 0xb0, 0x1e, 0x02, 0xfd,
+	0x48, 0x2f, 0x53, 0x8d, 0xd0, 0xe7, 0x7d, 0x5f, 0xa5, 0x17, 0x17, 0x6f, 0x72, 0x0b, 0xbf, 0xbe,
+	0xc9, 0x2d, 0x7c, 0x77, 0x99, 0x43, 0x17, 0x97, 0x39, 0xf4, 0xcb, 0x65, 0x0e, 0xfd, 0x71, 0x99,
+	0x43, 0xcf, 0x4b, 0x91, 0x7f, 0x39, 0xb7, 0x03, 0xf9, 0x38, 0x21, 0xae, 0xd1, 0x47, 0xff, 0x06,
+	0x00, 0x00, 0xff, 0xff, 0x55, 0xaa, 0x35, 0xc8, 0xbf, 0x0e, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto b/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
new file mode 100644
index 0000000..f8c0895
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto
@@ -0,0 +1,150 @@
+syntax = "proto3";
+
+package containerd.services.snapshots.v1;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/empty.proto";
+import "google/protobuf/field_mask.proto";
+import "google/protobuf/timestamp.proto";
+import "github.com/containerd/containerd/api/types/mount.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/snapshot/v1;snapshot";
+
+// Snapshot service manages snapshots
+service Snapshots {
+	rpc Prepare(PrepareSnapshotRequest) returns (PrepareSnapshotResponse);
+	rpc View(ViewSnapshotRequest) returns (ViewSnapshotResponse);
+	rpc Mounts(MountsRequest) returns (MountsResponse);
+	rpc Commit(CommitSnapshotRequest) returns (google.protobuf.Empty);
+	rpc Remove(RemoveSnapshotRequest) returns (google.protobuf.Empty);
+	rpc Stat(StatSnapshotRequest) returns (StatSnapshotResponse);
+	rpc Update(UpdateSnapshotRequest) returns (UpdateSnapshotResponse);
+	rpc List(ListSnapshotsRequest) returns (stream ListSnapshotsResponse);
+	rpc Usage(UsageRequest) returns (UsageResponse);
+}
+
+message PrepareSnapshotRequest {
+	string snapshotter = 1;
+	string key = 2;
+	string parent = 3;
+
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels  = 4;
+}
+
+message PrepareSnapshotResponse {
+	repeated containerd.types.Mount mounts = 1;
+}
+
+message ViewSnapshotRequest {
+	string snapshotter = 1;
+	string key = 2;
+	string parent = 3;
+
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels  = 4;
+}
+
+message ViewSnapshotResponse {
+	repeated containerd.types.Mount mounts = 1;
+}
+
+message MountsRequest {
+	string snapshotter = 1;
+	string key = 2;
+}
+
+message MountsResponse {
+	repeated containerd.types.Mount mounts = 1;
+}
+
+message RemoveSnapshotRequest {
+	string snapshotter = 1;
+	string key = 2;
+}
+
+message CommitSnapshotRequest {
+	string snapshotter = 1;
+	string name = 2;
+	string key = 3;
+
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels  = 4;
+}
+
+message StatSnapshotRequest {
+	string snapshotter = 1;
+	string key = 2;
+}
+
+enum Kind {
+	option (gogoproto.goproto_enum_prefix) = false;
+	option (gogoproto.enum_customname) = "Kind";
+
+	UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"];
+	VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"];
+	ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"];
+	COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"];
+}
+
+message Info {
+	string name = 1;
+	string parent = 2;
+	Kind kind = 3;
+
+	// CreatedAt provides the time at which the snapshot was created.
+	google.protobuf.Timestamp created_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// UpdatedAt provides the time the info was last updated.
+	google.protobuf.Timestamp updated_at = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+
+	// Labels are arbitrary data on snapshots.
+	//
+	// The combined size of a key/value pair cannot exceed 4096 bytes.
+	map<string, string> labels  = 6;
+}
+
+message StatSnapshotResponse {
+	Info info = 1 [(gogoproto.nullable) = false];
+}
+
+message UpdateSnapshotRequest {
+	string snapshotter = 1;
+	Info info = 2 [(gogoproto.nullable) = false];
+
+	// UpdateMask specifies which fields to perform the update on. If empty,
+	// the operation applies to all fields.
+	//
+	// In info, Name, Parent, Kind, Created are immutable,
+	// other field may be updated using this mask.
+	// If no mask is provided, all mutable field are updated.
+	google.protobuf.FieldMask update_mask = 3;
+}
+
+message UpdateSnapshotResponse {
+	Info info = 1 [(gogoproto.nullable) = false];
+}
+
+message ListSnapshotsRequest{
+	string snapshotter = 1;
+}
+
+message ListSnapshotsResponse {
+	repeated Info info = 1 [(gogoproto.nullable) = false];
+}
+
+message UsageRequest {
+	string snapshotter = 1;
+	string key = 2;
+}
+
+message UsageResponse {
+	int64 size = 1;
+	int64 inodes = 2;
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
new file mode 100644
index 0000000..0f58768
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.pb.go
@@ -0,0 +1,5812 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
+// DO NOT EDIT!
+
+/*
+	Package tasks is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
+
+	It has these top-level messages:
+		CreateTaskRequest
+		CreateTaskResponse
+		StartRequest
+		StartResponse
+		DeleteTaskRequest
+		DeleteResponse
+		DeleteProcessRequest
+		GetRequest
+		GetResponse
+		ListTasksRequest
+		ListTasksResponse
+		KillRequest
+		ExecProcessRequest
+		ExecProcessResponse
+		ResizePtyRequest
+		CloseIORequest
+		PauseTaskRequest
+		ResumeTaskRequest
+		ListPidsRequest
+		ListPidsResponse
+		CheckpointTaskRequest
+		CheckpointTaskResponse
+		UpdateTaskRequest
+		MetricsRequest
+		MetricsResponse
+		WaitRequest
+		WaitResponse
+*/
+package tasks
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/empty"
+import google_protobuf1 "github.com/gogo/protobuf/types"
+import _ "github.com/gogo/protobuf/gogoproto"
+import containerd_types "github.com/containerd/containerd/api/types"
+import containerd_types1 "github.com/containerd/containerd/api/types"
+import containerd_types2 "github.com/containerd/containerd/api/types"
+import containerd_v1_types "github.com/containerd/containerd/api/types/task"
+import _ "github.com/gogo/protobuf/types"
+
+import time "time"
+import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type CreateTaskRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	// RootFS provides the pre-chroot mounts to perform in the shim before
+	// executing the container task.
+	//
+	// These are for mounts that cannot be performed in the user namespace.
+	// Typically, these mounts should be resolved from snapshots specified on
+	// the container object.
+	Rootfs     []*containerd_types.Mount     `protobuf:"bytes,3,rep,name=rootfs" json:"rootfs,omitempty"`
+	Stdin      string                        `protobuf:"bytes,4,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout     string                        `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr     string                        `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal   bool                          `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	Checkpoint *containerd_types2.Descriptor `protobuf:"bytes,8,opt,name=checkpoint" json:"checkpoint,omitempty"`
+	Options    *google_protobuf1.Any         `protobuf:"bytes,9,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *CreateTaskRequest) Reset()                    { *m = CreateTaskRequest{} }
+func (*CreateTaskRequest) ProtoMessage()               {}
+func (*CreateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{0} }
+
+type CreateTaskResponse struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Pid         uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+}
+
+func (m *CreateTaskResponse) Reset()                    { *m = CreateTaskResponse{} }
+func (*CreateTaskResponse) ProtoMessage()               {}
+func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{1} }
+
+type StartRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+}
+
+func (m *StartRequest) Reset()                    { *m = StartRequest{} }
+func (*StartRequest) ProtoMessage()               {}
+func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{2} }
+
+type StartResponse struct {
+	Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+}
+
+func (m *StartResponse) Reset()                    { *m = StartResponse{} }
+func (*StartResponse) ProtoMessage()               {}
+func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{3} }
+
+type DeleteTaskRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+}
+
+func (m *DeleteTaskRequest) Reset()                    { *m = DeleteTaskRequest{} }
+func (*DeleteTaskRequest) ProtoMessage()               {}
+func (*DeleteTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{4} }
+
+type DeleteResponse struct {
+	ID         string    `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Pid        uint32    `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+	ExitStatus uint32    `protobuf:"varint,3,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt   time.Time `protobuf:"bytes,4,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+}
+
+func (m *DeleteResponse) Reset()                    { *m = DeleteResponse{} }
+func (*DeleteResponse) ProtoMessage()               {}
+func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{5} }
+
+type DeleteProcessRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+}
+
+func (m *DeleteProcessRequest) Reset()                    { *m = DeleteProcessRequest{} }
+func (*DeleteProcessRequest) ProtoMessage()               {}
+func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{6} }
+
+type GetRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+}
+
+func (m *GetRequest) Reset()                    { *m = GetRequest{} }
+func (*GetRequest) ProtoMessage()               {}
+func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{7} }
+
+type GetResponse struct {
+	Process *containerd_v1_types.Process `protobuf:"bytes,1,opt,name=process" json:"process,omitempty"`
+}
+
+func (m *GetResponse) Reset()                    { *m = GetResponse{} }
+func (*GetResponse) ProtoMessage()               {}
+func (*GetResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{8} }
+
+type ListTasksRequest struct {
+	Filter string `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"`
+}
+
+func (m *ListTasksRequest) Reset()                    { *m = ListTasksRequest{} }
+func (*ListTasksRequest) ProtoMessage()               {}
+func (*ListTasksRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{9} }
+
+type ListTasksResponse struct {
+	Tasks []*containerd_v1_types.Process `protobuf:"bytes,1,rep,name=tasks" json:"tasks,omitempty"`
+}
+
+func (m *ListTasksResponse) Reset()                    { *m = ListTasksResponse{} }
+func (*ListTasksResponse) ProtoMessage()               {}
+func (*ListTasksResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{10} }
+
+type KillRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	Signal      uint32 `protobuf:"varint,3,opt,name=signal,proto3" json:"signal,omitempty"`
+	All         bool   `protobuf:"varint,4,opt,name=all,proto3" json:"all,omitempty"`
+}
+
+func (m *KillRequest) Reset()                    { *m = KillRequest{} }
+func (*KillRequest) ProtoMessage()               {}
+func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{11} }
+
+type ExecProcessRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Stdin       string `protobuf:"bytes,2,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout      string `protobuf:"bytes,3,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr      string `protobuf:"bytes,4,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal    bool   `protobuf:"varint,5,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	// Spec for starting a process in the target container.
+	//
+	// For runc, this is a process spec, for example.
+	Spec *google_protobuf1.Any `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"`
+	// id of the exec process
+	ExecID string `protobuf:"bytes,7,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+}
+
+func (m *ExecProcessRequest) Reset()                    { *m = ExecProcessRequest{} }
+func (*ExecProcessRequest) ProtoMessage()               {}
+func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{12} }
+
+type ExecProcessResponse struct {
+}
+
+func (m *ExecProcessResponse) Reset()                    { *m = ExecProcessResponse{} }
+func (*ExecProcessResponse) ProtoMessage()               {}
+func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{13} }
+
+type ResizePtyRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	Width       uint32 `protobuf:"varint,3,opt,name=width,proto3" json:"width,omitempty"`
+	Height      uint32 `protobuf:"varint,4,opt,name=height,proto3" json:"height,omitempty"`
+}
+
+func (m *ResizePtyRequest) Reset()                    { *m = ResizePtyRequest{} }
+func (*ResizePtyRequest) ProtoMessage()               {}
+func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{14} }
+
+type CloseIORequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+	Stdin       bool   `protobuf:"varint,3,opt,name=stdin,proto3" json:"stdin,omitempty"`
+}
+
+func (m *CloseIORequest) Reset()                    { *m = CloseIORequest{} }
+func (*CloseIORequest) ProtoMessage()               {}
+func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{15} }
+
+type PauseTaskRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+}
+
+func (m *PauseTaskRequest) Reset()                    { *m = PauseTaskRequest{} }
+func (*PauseTaskRequest) ProtoMessage()               {}
+func (*PauseTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{16} }
+
+type ResumeTaskRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+}
+
+func (m *ResumeTaskRequest) Reset()                    { *m = ResumeTaskRequest{} }
+func (*ResumeTaskRequest) ProtoMessage()               {}
+func (*ResumeTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{17} }
+
+type ListPidsRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+}
+
+func (m *ListPidsRequest) Reset()                    { *m = ListPidsRequest{} }
+func (*ListPidsRequest) ProtoMessage()               {}
+func (*ListPidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{18} }
+
+type ListPidsResponse struct {
+	// Processes includes the process ID and additional process information
+	Processes []*containerd_v1_types.ProcessInfo `protobuf:"bytes,1,rep,name=processes" json:"processes,omitempty"`
+}
+
+func (m *ListPidsResponse) Reset()                    { *m = ListPidsResponse{} }
+func (*ListPidsResponse) ProtoMessage()               {}
+func (*ListPidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{19} }
+
+type CheckpointTaskRequest struct {
+	ContainerID      string                                     `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ParentCheckpoint github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=parent_checkpoint,json=parentCheckpoint,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"parent_checkpoint"`
+	Options          *google_protobuf1.Any                      `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *CheckpointTaskRequest) Reset()                    { *m = CheckpointTaskRequest{} }
+func (*CheckpointTaskRequest) ProtoMessage()               {}
+func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{20} }
+
+type CheckpointTaskResponse struct {
+	Descriptors []*containerd_types2.Descriptor `protobuf:"bytes,1,rep,name=descriptors" json:"descriptors,omitempty"`
+}
+
+func (m *CheckpointTaskResponse) Reset()                    { *m = CheckpointTaskResponse{} }
+func (*CheckpointTaskResponse) ProtoMessage()               {}
+func (*CheckpointTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{21} }
+
+type UpdateTaskRequest struct {
+	ContainerID string                `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	Resources   *google_protobuf1.Any `protobuf:"bytes,2,opt,name=resources" json:"resources,omitempty"`
+}
+
+func (m *UpdateTaskRequest) Reset()                    { *m = UpdateTaskRequest{} }
+func (*UpdateTaskRequest) ProtoMessage()               {}
+func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{22} }
+
+type MetricsRequest struct {
+	Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
+}
+
+func (m *MetricsRequest) Reset()                    { *m = MetricsRequest{} }
+func (*MetricsRequest) ProtoMessage()               {}
+func (*MetricsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{23} }
+
+type MetricsResponse struct {
+	Metrics []*containerd_types1.Metric `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"`
+}
+
+func (m *MetricsResponse) Reset()                    { *m = MetricsResponse{} }
+func (*MetricsResponse) ProtoMessage()               {}
+func (*MetricsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{24} }
+
+type WaitRequest struct {
+	ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ExecID      string `protobuf:"bytes,2,opt,name=exec_id,json=execId,proto3" json:"exec_id,omitempty"`
+}
+
+func (m *WaitRequest) Reset()                    { *m = WaitRequest{} }
+func (*WaitRequest) ProtoMessage()               {}
+func (*WaitRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{25} }
+
+type WaitResponse struct {
+	ExitStatus uint32    `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt   time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+}
+
+func (m *WaitResponse) Reset()                    { *m = WaitResponse{} }
+func (*WaitResponse) ProtoMessage()               {}
+func (*WaitResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{26} }
+
+func init() {
+	proto.RegisterType((*CreateTaskRequest)(nil), "containerd.services.tasks.v1.CreateTaskRequest")
+	proto.RegisterType((*CreateTaskResponse)(nil), "containerd.services.tasks.v1.CreateTaskResponse")
+	proto.RegisterType((*StartRequest)(nil), "containerd.services.tasks.v1.StartRequest")
+	proto.RegisterType((*StartResponse)(nil), "containerd.services.tasks.v1.StartResponse")
+	proto.RegisterType((*DeleteTaskRequest)(nil), "containerd.services.tasks.v1.DeleteTaskRequest")
+	proto.RegisterType((*DeleteResponse)(nil), "containerd.services.tasks.v1.DeleteResponse")
+	proto.RegisterType((*DeleteProcessRequest)(nil), "containerd.services.tasks.v1.DeleteProcessRequest")
+	proto.RegisterType((*GetRequest)(nil), "containerd.services.tasks.v1.GetRequest")
+	proto.RegisterType((*GetResponse)(nil), "containerd.services.tasks.v1.GetResponse")
+	proto.RegisterType((*ListTasksRequest)(nil), "containerd.services.tasks.v1.ListTasksRequest")
+	proto.RegisterType((*ListTasksResponse)(nil), "containerd.services.tasks.v1.ListTasksResponse")
+	proto.RegisterType((*KillRequest)(nil), "containerd.services.tasks.v1.KillRequest")
+	proto.RegisterType((*ExecProcessRequest)(nil), "containerd.services.tasks.v1.ExecProcessRequest")
+	proto.RegisterType((*ExecProcessResponse)(nil), "containerd.services.tasks.v1.ExecProcessResponse")
+	proto.RegisterType((*ResizePtyRequest)(nil), "containerd.services.tasks.v1.ResizePtyRequest")
+	proto.RegisterType((*CloseIORequest)(nil), "containerd.services.tasks.v1.CloseIORequest")
+	proto.RegisterType((*PauseTaskRequest)(nil), "containerd.services.tasks.v1.PauseTaskRequest")
+	proto.RegisterType((*ResumeTaskRequest)(nil), "containerd.services.tasks.v1.ResumeTaskRequest")
+	proto.RegisterType((*ListPidsRequest)(nil), "containerd.services.tasks.v1.ListPidsRequest")
+	proto.RegisterType((*ListPidsResponse)(nil), "containerd.services.tasks.v1.ListPidsResponse")
+	proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.services.tasks.v1.CheckpointTaskRequest")
+	proto.RegisterType((*CheckpointTaskResponse)(nil), "containerd.services.tasks.v1.CheckpointTaskResponse")
+	proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.services.tasks.v1.UpdateTaskRequest")
+	proto.RegisterType((*MetricsRequest)(nil), "containerd.services.tasks.v1.MetricsRequest")
+	proto.RegisterType((*MetricsResponse)(nil), "containerd.services.tasks.v1.MetricsResponse")
+	proto.RegisterType((*WaitRequest)(nil), "containerd.services.tasks.v1.WaitRequest")
+	proto.RegisterType((*WaitResponse)(nil), "containerd.services.tasks.v1.WaitResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Tasks service
+
+type TasksClient interface {
+	// Create a task.
+	Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error)
+	// Start a process.
+	Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error)
+	// Delete a task and on disk state.
+	Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error)
+	DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error)
+	Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
+	List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error)
+	// Kill a task or process.
+	Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error)
+	Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error)
+	Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
+	Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error)
+	Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error)
+}
+
+type tasksClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewTasksClient(cc *grpc.ClientConn) TasksClient {
+	return &tasksClient{cc}
+}
+
+func (c *tasksClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) {
+	out := new(CreateTaskResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Create", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) {
+	out := new(StartResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Start", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Delete(ctx context.Context, in *DeleteTaskRequest, opts ...grpc.CallOption) (*DeleteResponse, error) {
+	out := new(DeleteResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Delete", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) {
+	out := new(DeleteResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/DeleteProcess", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) {
+	out := new(GetResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Get", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) List(ctx context.Context, in *ListTasksRequest, opts ...grpc.CallOption) (*ListTasksResponse, error) {
+	out := new(ListTasksResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/List", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	out := new(google_protobuf.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Kill", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	out := new(google_protobuf.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Exec", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	out := new(google_protobuf.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ResizePty", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	out := new(google_protobuf.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/CloseIO", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Pause(ctx context.Context, in *PauseTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	out := new(google_protobuf.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Pause", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Resume(ctx context.Context, in *ResumeTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	out := new(google_protobuf.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Resume", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) {
+	out := new(ListPidsResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/ListPids", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error) {
+	out := new(CheckpointTaskResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Checkpoint", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	out := new(google_protobuf.Empty)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Update", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) {
+	out := new(MetricsResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *tasksClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) {
+	out := new(WaitResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Wait", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Tasks service
+
+type TasksServer interface {
+	// Create a task.
+	Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error)
+	// Start a process.
+	Start(context.Context, *StartRequest) (*StartResponse, error)
+	// Delete a task and on disk state.
+	Delete(context.Context, *DeleteTaskRequest) (*DeleteResponse, error)
+	DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error)
+	Get(context.Context, *GetRequest) (*GetResponse, error)
+	List(context.Context, *ListTasksRequest) (*ListTasksResponse, error)
+	// Kill a task or process.
+	Kill(context.Context, *KillRequest) (*google_protobuf.Empty, error)
+	Exec(context.Context, *ExecProcessRequest) (*google_protobuf.Empty, error)
+	ResizePty(context.Context, *ResizePtyRequest) (*google_protobuf.Empty, error)
+	CloseIO(context.Context, *CloseIORequest) (*google_protobuf.Empty, error)
+	Pause(context.Context, *PauseTaskRequest) (*google_protobuf.Empty, error)
+	Resume(context.Context, *ResumeTaskRequest) (*google_protobuf.Empty, error)
+	ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error)
+	Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error)
+	Update(context.Context, *UpdateTaskRequest) (*google_protobuf.Empty, error)
+	Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error)
+	Wait(context.Context, *WaitRequest) (*WaitResponse, error)
+}
+
+func RegisterTasksServer(s *grpc.Server, srv TasksServer) {
+	s.RegisterService(&_Tasks_serviceDesc, srv)
+}
+
+func _Tasks_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Create(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Create",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Create(ctx, req.(*CreateTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StartRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Start(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Start",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Start(ctx, req.(*StartRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Delete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Delete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Delete(ctx, req.(*DeleteTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteProcessRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).DeleteProcess(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/DeleteProcess",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).DeleteProcess(ctx, req.(*DeleteProcessRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(GetRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Get(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Get",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Get(ctx, req.(*GetRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListTasksRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).List(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/List",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).List(ctx, req.(*ListTasksRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(KillRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Kill(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Kill",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Kill(ctx, req.(*KillRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ExecProcessRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Exec(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Exec",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Exec(ctx, req.(*ExecProcessRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ResizePtyRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).ResizePty(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/ResizePty",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).ResizePty(ctx, req.(*ResizePtyRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CloseIORequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).CloseIO(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/CloseIO",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).CloseIO(ctx, req.(*CloseIORequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PauseTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Pause(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Pause",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Pause(ctx, req.(*PauseTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ResumeTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Resume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Resume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Resume(ctx, req.(*ResumeTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListPidsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).ListPids(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/ListPids",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).ListPids(ctx, req.(*ListPidsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CheckpointTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Checkpoint(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Checkpoint",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Checkpoint(ctx, req.(*CheckpointTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Update(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Update",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Update(ctx, req.(*UpdateTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MetricsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Metrics(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Metrics",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Metrics(ctx, req.(*MetricsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Tasks_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(WaitRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(TasksServer).Wait(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.tasks.v1.Tasks/Wait",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(TasksServer).Wait(ctx, req.(*WaitRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Tasks_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.tasks.v1.Tasks",
+	HandlerType: (*TasksServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Create",
+			Handler:    _Tasks_Create_Handler,
+		},
+		{
+			MethodName: "Start",
+			Handler:    _Tasks_Start_Handler,
+		},
+		{
+			MethodName: "Delete",
+			Handler:    _Tasks_Delete_Handler,
+		},
+		{
+			MethodName: "DeleteProcess",
+			Handler:    _Tasks_DeleteProcess_Handler,
+		},
+		{
+			MethodName: "Get",
+			Handler:    _Tasks_Get_Handler,
+		},
+		{
+			MethodName: "List",
+			Handler:    _Tasks_List_Handler,
+		},
+		{
+			MethodName: "Kill",
+			Handler:    _Tasks_Kill_Handler,
+		},
+		{
+			MethodName: "Exec",
+			Handler:    _Tasks_Exec_Handler,
+		},
+		{
+			MethodName: "ResizePty",
+			Handler:    _Tasks_ResizePty_Handler,
+		},
+		{
+			MethodName: "CloseIO",
+			Handler:    _Tasks_CloseIO_Handler,
+		},
+		{
+			MethodName: "Pause",
+			Handler:    _Tasks_Pause_Handler,
+		},
+		{
+			MethodName: "Resume",
+			Handler:    _Tasks_Resume_Handler,
+		},
+		{
+			MethodName: "ListPids",
+			Handler:    _Tasks_ListPids_Handler,
+		},
+		{
+			MethodName: "Checkpoint",
+			Handler:    _Tasks_Checkpoint_Handler,
+		},
+		{
+			MethodName: "Update",
+			Handler:    _Tasks_Update_Handler,
+		},
+		{
+			MethodName: "Metrics",
+			Handler:    _Tasks_Metrics_Handler,
+		},
+		{
+			MethodName: "Wait",
+			Handler:    _Tasks_Wait_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/containerd/containerd/api/services/tasks/v1/tasks.proto",
+}
+
+func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.Rootfs) > 0 {
+		for _, msg := range m.Rootfs {
+			dAtA[i] = 0x1a
+			i++
+			i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if len(m.Stdin) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin)))
+		i += copy(dAtA[i:], m.Stdin)
+	}
+	if len(m.Stdout) > 0 {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout)))
+		i += copy(dAtA[i:], m.Stdout)
+	}
+	if len(m.Stderr) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr)))
+		i += copy(dAtA[i:], m.Stderr)
+	}
+	if m.Terminal {
+		dAtA[i] = 0x38
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.Checkpoint != nil {
+		dAtA[i] = 0x42
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Checkpoint.Size()))
+		n1, err := m.Checkpoint.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	if m.Options != nil {
+		dAtA[i] = 0x4a
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Options.Size()))
+		n2, err := m.Options.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	return i, nil
+}
+
+func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
+	}
+	return i, nil
+}
+
+func (m *StartRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	return i, nil
+}
+
+func (m *StartResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Pid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
+	}
+	return i, nil
+}
+
+func (m *DeleteTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	return i, nil
+}
+
+func (m *DeleteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Pid))
+	}
+	if m.ExitStatus != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus))
+	}
+	dAtA[i] = 0x22
+	i++
+	i = encodeVarintTasks(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n3, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n3
+	return i, nil
+}
+
+func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	return i, nil
+}
+
+func (m *GetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	return i, nil
+}
+
+func (m *GetResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *GetResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Process != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Process.Size()))
+		n4, err := m.Process.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n4
+	}
+	return i, nil
+}
+
+func (m *ListTasksRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListTasksRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filter) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.Filter)))
+		i += copy(dAtA[i:], m.Filter)
+	}
+	return i, nil
+}
+
+func (m *ListTasksResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListTasksResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Tasks) > 0 {
+		for _, msg := range m.Tasks {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *KillRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	if m.Signal != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Signal))
+	}
+	if m.All {
+		dAtA[i] = 0x20
+		i++
+		if m.All {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	return i, nil
+}
+
+func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.Stdin) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdin)))
+		i += copy(dAtA[i:], m.Stdin)
+	}
+	if len(m.Stdout) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.Stdout)))
+		i += copy(dAtA[i:], m.Stdout)
+	}
+	if len(m.Stderr) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.Stderr)))
+		i += copy(dAtA[i:], m.Stderr)
+	}
+	if m.Terminal {
+		dAtA[i] = 0x28
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.Spec != nil {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Spec.Size()))
+		n5, err := m.Spec.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n5
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	return i, nil
+}
+
+func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	return i, nil
+}
+
+func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	if m.Width != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Width))
+	}
+	if m.Height != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Height))
+	}
+	return i, nil
+}
+
+func (m *CloseIORequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	if m.Stdin {
+		dAtA[i] = 0x18
+		i++
+		if m.Stdin {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	return i, nil
+}
+
+func (m *PauseTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PauseTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	return i, nil
+}
+
+func (m *ResumeTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResumeTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	return i, nil
+}
+
+func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListPidsRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	return i, nil
+}
+
+func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListPidsResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Processes) > 0 {
+		for _, msg := range m.Processes {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ParentCheckpoint) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ParentCheckpoint)))
+		i += copy(dAtA[i:], m.ParentCheckpoint)
+	}
+	if m.Options != nil {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Options.Size()))
+		n6, err := m.Options.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n6
+	}
+	return i, nil
+}
+
+func (m *CheckpointTaskResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CheckpointTaskResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Descriptors) > 0 {
+		for _, msg := range m.Descriptors {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if m.Resources != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.Resources.Size()))
+		n7, err := m.Resources.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n7
+	}
+	return i, nil
+}
+
+func (m *MetricsRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MetricsRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			dAtA[i] = 0xa
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	return i, nil
+}
+
+func (m *MetricsResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MetricsResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Metrics) > 0 {
+		for _, msg := range m.Metrics {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *WaitRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ExecID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(len(m.ExecID)))
+		i += copy(dAtA[i:], m.ExecID)
+	}
+	return i, nil
+}
+
+func (m *WaitResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.ExitStatus != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintTasks(dAtA, i, uint64(m.ExitStatus))
+	}
+	dAtA[i] = 0x12
+	i++
+	i = encodeVarintTasks(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n8, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n8
+	return i, nil
+}
+
+func encodeFixed64Tasks(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Tasks(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintTasks(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *CreateTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if len(m.Rootfs) > 0 {
+		for _, e := range m.Rootfs {
+			l = e.Size()
+			n += 1 + l + sovTasks(uint64(l))
+		}
+	}
+	l = len(m.Stdin)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.Stdout)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.Stderr)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Terminal {
+		n += 2
+	}
+	if m.Checkpoint != nil {
+		l = m.Checkpoint.Size()
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Options != nil {
+		l = m.Options.Size()
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *CreateTaskResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovTasks(uint64(m.Pid))
+	}
+	return n
+}
+
+func (m *StartRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *StartResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Pid != 0 {
+		n += 1 + sovTasks(uint64(m.Pid))
+	}
+	return n
+}
+
+func (m *DeleteTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *DeleteResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovTasks(uint64(m.Pid))
+	}
+	if m.ExitStatus != 0 {
+		n += 1 + sovTasks(uint64(m.ExitStatus))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
+	n += 1 + l + sovTasks(uint64(l))
+	return n
+}
+
+func (m *DeleteProcessRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *GetRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *GetResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Process != nil {
+		l = m.Process.Size()
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *ListTasksRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Filter)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *ListTasksResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Tasks) > 0 {
+		for _, e := range m.Tasks {
+			l = e.Size()
+			n += 1 + l + sovTasks(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *KillRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Signal != 0 {
+		n += 1 + sovTasks(uint64(m.Signal))
+	}
+	if m.All {
+		n += 2
+	}
+	return n
+}
+
+func (m *ExecProcessRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.Stdin)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.Stdout)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.Stderr)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Terminal {
+		n += 2
+	}
+	if m.Spec != nil {
+		l = m.Spec.Size()
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *ExecProcessResponse) Size() (n int) {
+	var l int
+	_ = l
+	return n
+}
+
+func (m *ResizePtyRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Width != 0 {
+		n += 1 + sovTasks(uint64(m.Width))
+	}
+	if m.Height != 0 {
+		n += 1 + sovTasks(uint64(m.Height))
+	}
+	return n
+}
+
+func (m *CloseIORequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Stdin {
+		n += 2
+	}
+	return n
+}
+
+func (m *PauseTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *ResumeTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *ListPidsRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *ListPidsResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Processes) > 0 {
+		for _, e := range m.Processes {
+			l = e.Size()
+			n += 1 + l + sovTasks(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *CheckpointTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ParentCheckpoint)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Options != nil {
+		l = m.Options.Size()
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *CheckpointTaskResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Descriptors) > 0 {
+		for _, e := range m.Descriptors {
+			l = e.Size()
+			n += 1 + l + sovTasks(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *UpdateTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	if m.Resources != nil {
+		l = m.Resources.Size()
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *MetricsRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Filters) > 0 {
+		for _, s := range m.Filters {
+			l = len(s)
+			n += 1 + l + sovTasks(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *MetricsResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Metrics) > 0 {
+		for _, e := range m.Metrics {
+			l = e.Size()
+			n += 1 + l + sovTasks(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *WaitRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	l = len(m.ExecID)
+	if l > 0 {
+		n += 1 + l + sovTasks(uint64(l))
+	}
+	return n
+}
+
+func (m *WaitResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.ExitStatus != 0 {
+		n += 1 + sovTasks(uint64(m.ExitStatus))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
+	n += 1 + l + sovTasks(uint64(l))
+	return n
+}
+
+func sovTasks(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozTasks(x uint64) (n int) {
+	return sovTasks(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CreateTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateTaskRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "containerd_types2.Descriptor", 1) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateTaskResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateTaskResponse{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StartRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StartRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StartResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StartResponse{`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteTaskRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteResponse{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteProcessRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteProcessRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *GetResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&GetResponse{`,
+		`Process:` + strings.Replace(fmt.Sprintf("%v", this.Process), "Process", "containerd_v1_types.Process", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListTasksRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListTasksRequest{`,
+		`Filter:` + fmt.Sprintf("%v", this.Filter) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListTasksResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListTasksResponse{`,
+		`Tasks:` + strings.Replace(fmt.Sprintf("%v", this.Tasks), "Process", "containerd_v1_types.Process", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *KillRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&KillRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`Signal:` + fmt.Sprintf("%v", this.Signal) + `,`,
+		`All:` + fmt.Sprintf("%v", this.All) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ExecProcessRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ExecProcessRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ExecProcessResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ExecProcessResponse{`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResizePtyRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResizePtyRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`Width:` + fmt.Sprintf("%v", this.Width) + `,`,
+		`Height:` + fmt.Sprintf("%v", this.Height) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CloseIORequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CloseIORequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PauseTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PauseTaskRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResumeTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResumeTaskRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListPidsRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListPidsRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListPidsResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListPidsResponse{`,
+		`Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "containerd_v1_types.ProcessInfo", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CheckpointTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CheckpointTaskRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CheckpointTaskResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CheckpointTaskResponse{`,
+		`Descriptors:` + strings.Replace(fmt.Sprintf("%v", this.Descriptors), "Descriptor", "containerd_types2.Descriptor", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateTaskRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MetricsRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&MetricsRequest{`,
+		`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *MetricsResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&MetricsResponse{`,
+		`Metrics:` + strings.Replace(fmt.Sprintf("%v", this.Metrics), "Metric", "containerd_types1.Metric", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WaitRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WaitRequest{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ExecID:` + fmt.Sprintf("%v", this.ExecID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WaitResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WaitResponse{`,
+		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringTasks(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Rootfs = append(m.Rootfs, &containerd_types.Mount{})
+			if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdin = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdout = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stderr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Checkpoint == nil {
+				m.Checkpoint = &containerd_types2.Descriptor{}
+			}
+			if err := m.Checkpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = &google_protobuf1.Any{}
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StartRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StartRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StartResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StartResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
+			}
+			m.ExitStatus = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteProcessRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteProcessRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *GetResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: GetResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: GetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Process", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Process == nil {
+				m.Process = &containerd_v1_types.Process{}
+			}
+			if err := m.Process.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListTasksRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListTasksRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListTasksRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filter = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListTasksResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListTasksResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListTasksResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Tasks", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Tasks = append(m.Tasks, &containerd_v1_types.Process{})
+			if err := m.Tasks[len(m.Tasks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *KillRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: KillRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType)
+			}
+			m.Signal = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Signal |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field All", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.All = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdin = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdout = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stderr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Spec == nil {
+				m.Spec = &google_protobuf1.Any{}
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType)
+			}
+			m.Width = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Width |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+			}
+			m.Height = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Height |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CloseIORequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdin = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PauseTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PauseTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PauseTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResumeTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResumeTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResumeTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListPidsRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListPidsRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListPidsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListPidsResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListPidsResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListPidsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Processes = append(m.Processes, &containerd_v1_types.ProcessInfo{})
+			if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ParentCheckpoint = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = &google_protobuf1.Any{}
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CheckpointTaskResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CheckpointTaskResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CheckpointTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Descriptors", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Descriptors = append(m.Descriptors, &containerd_types2.Descriptor{})
+			if err := m.Descriptors[len(m.Descriptors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Resources == nil {
+				m.Resources = &google_protobuf1.Any{}
+			}
+			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MetricsRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MetricsRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MetricsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MetricsResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MetricsResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MetricsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Metrics = append(m.Metrics, &containerd_types1.Metric{})
+			if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WaitRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExecID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ExecID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WaitResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
+			}
+			m.ExitStatus = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTasks
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTasks(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTasks
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipTasks(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowTasks
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowTasks
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthTasks
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowTasks
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipTasks(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthTasks = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowTasks   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/tasks/v1/tasks.proto", fileDescriptorTasks)
+}
+
+var fileDescriptorTasks = []byte{
+	// 1317 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5b, 0x6f, 0x1b, 0x45,
+	0x1b, 0xee, 0xfa, 0xec, 0xd7, 0x49, 0x9b, 0xec, 0x97, 0xe6, 0x33, 0x4b, 0x15, 0x87, 0xe5, 0xc6,
+	0x04, 0xba, 0x4b, 0x5d, 0x54, 0x21, 0x5a, 0x21, 0x35, 0x07, 0x22, 0x0b, 0xaa, 0xa6, 0xdb, 0x72,
+	0x50, 0x25, 0x14, 0xb6, 0xbb, 0x13, 0x67, 0x14, 0x7b, 0x67, 0xbb, 0x33, 0x4e, 0x1b, 0xb8, 0x80,
+	0x9f, 0xd0, 0x5b, 0x6e, 0xf8, 0x3d, 0xb9, 0xe4, 0x12, 0xa1, 0x2a, 0x50, 0xff, 0x0b, 0xee, 0xd0,
+	0x1c, 0x76, 0xb3, 0xb1, 0x63, 0xaf, 0x93, 0x34, 0xdc, 0xb4, 0x33, 0xb3, 0xef, 0x69, 0x9e, 0x79,
+	0x0f, 0x8f, 0x03, 0xab, 0x1d, 0xcc, 0x76, 0xfb, 0xcf, 0x2c, 0x8f, 0xf4, 0x6c, 0x8f, 0x04, 0xcc,
+	0xc5, 0x01, 0x8a, 0xfc, 0xf4, 0xd2, 0x0d, 0xb1, 0x4d, 0x51, 0xb4, 0x8f, 0x3d, 0x44, 0x6d, 0xe6,
+	0xd2, 0x3d, 0x6a, 0xef, 0xdf, 0x92, 0x0b, 0x2b, 0x8c, 0x08, 0x23, 0xfa, 0x8d, 0x63, 0x69, 0x2b,
+	0x96, 0xb4, 0xa4, 0xc0, 0xfe, 0x2d, 0xe3, 0xdd, 0x0e, 0x21, 0x9d, 0x2e, 0xb2, 0x85, 0xec, 0xb3,
+	0xfe, 0x8e, 0x8d, 0x7a, 0x21, 0x3b, 0x90, 0xaa, 0xc6, 0x3b, 0xc3, 0x1f, 0xdd, 0x20, 0xfe, 0xb4,
+	0xd0, 0x21, 0x1d, 0x22, 0x96, 0x36, 0x5f, 0xa9, 0xd3, 0x3b, 0x53, 0xc5, 0xcb, 0x0e, 0x42, 0x44,
+	0xed, 0x1e, 0xe9, 0x07, 0x4c, 0xe9, 0x7d, 0x7a, 0x16, 0x3d, 0xc4, 0x22, 0xec, 0xa9, 0xdb, 0x19,
+	0x77, 0xcf, 0xa0, 0xe9, 0x23, 0xea, 0x45, 0x38, 0x64, 0x24, 0x52, 0xca, 0x9f, 0x9d, 0x41, 0x99,
+	0x23, 0x26, 0xfe, 0x51, 0xba, 0x8d, 0x61, 0x6c, 0x18, 0xee, 0x21, 0xca, 0xdc, 0x5e, 0x28, 0x05,
+	0xcc, 0xc3, 0x1c, 0xcc, 0xaf, 0x45, 0xc8, 0x65, 0xe8, 0x89, 0x4b, 0xf7, 0x1c, 0xf4, 0xbc, 0x8f,
+	0x28, 0xd3, 0x5b, 0x30, 0x93, 0x98, 0xdf, 0xc6, 0x7e, 0x5d, 0x5b, 0xd6, 0x9a, 0xd5, 0xd5, 0x6b,
+	0x83, 0xa3, 0x46, 0x6d, 0x2d, 0x3e, 0x6f, 0xaf, 0x3b, 0xb5, 0x44, 0xa8, 0xed, 0xeb, 0x36, 0x94,
+	0x22, 0x42, 0xd8, 0x0e, 0xad, 0xe7, 0x97, 0xf3, 0xcd, 0x5a, 0xeb, 0xff, 0x56, 0xea, 0x49, 0x45,
+	0x74, 0xd6, 0x03, 0x0e, 0xa6, 0xa3, 0xc4, 0xf4, 0x05, 0x28, 0x52, 0xe6, 0xe3, 0xa0, 0x5e, 0xe0,
+	0xd6, 0x1d, 0xb9, 0xd1, 0x17, 0xa1, 0x44, 0x99, 0x4f, 0xfa, 0xac, 0x5e, 0x14, 0xc7, 0x6a, 0xa7,
+	0xce, 0x51, 0x14, 0xd5, 0x4b, 0xc9, 0x39, 0x8a, 0x22, 0xdd, 0x80, 0x0a, 0x43, 0x51, 0x0f, 0x07,
+	0x6e, 0xb7, 0x5e, 0x5e, 0xd6, 0x9a, 0x15, 0x27, 0xd9, 0xeb, 0xf7, 0x00, 0xbc, 0x5d, 0xe4, 0xed,
+	0x85, 0x04, 0x07, 0xac, 0x5e, 0x59, 0xd6, 0x9a, 0xb5, 0xd6, 0x8d, 0xd1, 0xb0, 0xd6, 0x13, 0xc4,
+	0x9d, 0x94, 0xbc, 0x6e, 0x41, 0x99, 0x84, 0x0c, 0x93, 0x80, 0xd6, 0xab, 0x42, 0x75, 0xc1, 0x92,
+	0x68, 0x5a, 0x31, 0x9a, 0xd6, 0xfd, 0xe0, 0xc0, 0x89, 0x85, 0xcc, 0xa7, 0xa0, 0xa7, 0x91, 0xa4,
+	0x21, 0x09, 0x28, 0x3a, 0x17, 0x94, 0x73, 0x90, 0x0f, 0xb1, 0x5f, 0xcf, 0x2d, 0x6b, 0xcd, 0x59,
+	0x87, 0x2f, 0xcd, 0x0e, 0xcc, 0x3c, 0x66, 0x6e, 0xc4, 0x2e, 0xf2, 0x40, 0xef, 0x43, 0x19, 0xbd,
+	0x44, 0xde, 0xb6, 0xb2, 0x5c, 0x5d, 0x85, 0xc1, 0x51, 0xa3, 0xb4, 0xf1, 0x12, 0x79, 0xed, 0x75,
+	0xa7, 0xc4, 0x3f, 0xb5, 0x7d, 0xf3, 0x3d, 0x98, 0x55, 0x8e, 0x54, 0xfc, 0x2a, 0x16, 0xed, 0x38,
+	0x96, 0x4d, 0x98, 0x5f, 0x47, 0x5d, 0x74, 0xe1, 0x8c, 0x31, 0x7f, 0xd3, 0xe0, 0xaa, 0xb4, 0x94,
+	0x78, 0x5b, 0x84, 0x5c, 0xa2, 0x5c, 0x1a, 0x1c, 0x35, 0x72, 0xed, 0x75, 0x27, 0x87, 0x4f, 0x41,
+	0x44, 0x6f, 0x40, 0x0d, 0xbd, 0xc4, 0x6c, 0x9b, 0x32, 0x97, 0xf5, 0x79, 0xce, 0xf1, 0x2f, 0xc0,
+	0x8f, 0x1e, 0x8b, 0x13, 0xfd, 0x3e, 0x54, 0xf9, 0x0e, 0xf9, 0xdb, 0x2e, 0x13, 0x29, 0x56, 0x6b,
+	0x19, 0x23, 0x0f, 0xf8, 0x24, 0x2e, 0x87, 0xd5, 0xca, 0xe1, 0x51, 0xe3, 0xca, 0xab, 0xbf, 0x1a,
+	0x9a, 0x53, 0x91, 0x6a, 0xf7, 0x99, 0x49, 0x60, 0x41, 0xc6, 0xb7, 0x15, 0x11, 0x0f, 0x51, 0x7a,
+	0xe9, 0xe8, 0x23, 0x80, 0x4d, 0x74, 0xf9, 0x8f, 0xbc, 0x01, 0x35, 0xe1, 0x46, 0x81, 0x7e, 0x07,
+	0xca, 0xa1, 0xbc, 0xa0, 0x70, 0x31, 0x54, 0x23, 0xfb, 0xb7, 0x54, 0x99, 0xc4, 0x20, 0xc4, 0xc2,
+	0xe6, 0x0a, 0xcc, 0x7d, 0x85, 0x29, 0xe3, 0x69, 0x90, 0x40, 0xb3, 0x08, 0xa5, 0x1d, 0xdc, 0x65,
+	0x28, 0x92, 0xd1, 0x3a, 0x6a, 0xc7, 0x93, 0x26, 0x25, 0x9b, 0xd4, 0x46, 0x51, 0xb4, 0xf8, 0xba,
+	0x26, 0x3a, 0xc6, 0x64, 0xb7, 0x52, 0xd4, 0x7c, 0xa5, 0x41, 0xed, 0x4b, 0xdc, 0xed, 0x5e, 0x36,
+	0x48, 0xa2, 0xe1, 0xe0, 0x0e, 0x6f, 0x2b, 0x32, 0xb7, 0xd4, 0x8e, 0xa7, 0xa2, 0xdb, 0xed, 0x8a,
+	0x8c, 0xaa, 0x38, 0x7c, 0x69, 0xfe, 0xa3, 0x81, 0xce, 0x95, 0xdf, 0x42, 0x96, 0x24, 0x3d, 0x31,
+	0x77, 0x7a, 0x4f, 0xcc, 0x8f, 0xe9, 0x89, 0x85, 0xb1, 0x3d, 0xb1, 0x38, 0xd4, 0x13, 0x9b, 0x50,
+	0xa0, 0x21, 0xf2, 0x44, 0x17, 0x1d, 0xd7, 0xd2, 0x84, 0x44, 0x1a, 0xa5, 0xf2, 0xd8, 0x54, 0xba,
+	0x0e, 0xff, 0x3b, 0x71, 0x75, 0xf9, 0xb2, 0xe6, 0xaf, 0x1a, 0xcc, 0x39, 0x88, 0xe2, 0x1f, 0xd1,
+	0x16, 0x3b, 0xb8, 0xf4, 0xa7, 0x5a, 0x80, 0xe2, 0x0b, 0xec, 0xb3, 0x5d, 0xf5, 0x52, 0x72, 0xc3,
+	0xd1, 0xd9, 0x45, 0xb8, 0xb3, 0x2b, 0xab, 0x7f, 0xd6, 0x51, 0x3b, 0xf3, 0x67, 0xb8, 0xba, 0xd6,
+	0x25, 0x14, 0xb5, 0x1f, 0xfe, 0x17, 0x81, 0xc9, 0xe7, 0xcc, 0x8b, 0x57, 0x90, 0x1b, 0xf3, 0x0b,
+	0x98, 0xdb, 0x72, 0xfb, 0xf4, 0xc2, 0xfd, 0x73, 0x13, 0xe6, 0x1d, 0x44, 0xfb, 0xbd, 0x0b, 0x1b,
+	0xda, 0x80, 0x6b, 0xbc, 0x38, 0xb7, 0xb0, 0x7f, 0x91, 0xe4, 0x35, 0x1d, 0xd9, 0x0f, 0xa4, 0x19,
+	0x55, 0xe2, 0x9f, 0x43, 0x55, 0xb5, 0x0b, 0x14, 0x97, 0xf9, 0xf2, 0xa4, 0x32, 0x6f, 0x07, 0x3b,
+	0xc4, 0x39, 0x56, 0x31, 0x5f, 0x6b, 0x70, 0x7d, 0x2d, 0x99, 0xc9, 0x17, 0xe5, 0x28, 0xdb, 0x30,
+	0x1f, 0xba, 0x11, 0x0a, 0xd8, 0x76, 0x8a, 0x17, 0xc8, 0xe7, 0x6b, 0xf1, 0xfe, 0xff, 0xe7, 0x51,
+	0x63, 0x25, 0xc5, 0xb6, 0x48, 0x88, 0x82, 0x44, 0x9d, 0xda, 0x1d, 0x72, 0xd3, 0xc7, 0x1d, 0x44,
+	0x99, 0xb5, 0x2e, 0xfe, 0x73, 0xe6, 0xa4, 0xb1, 0xb5, 0x53, 0x39, 0x43, 0x7e, 0x1a, 0xce, 0xf0,
+	0x1d, 0x2c, 0x0e, 0xdf, 0x2e, 0x01, 0xae, 0x76, 0xcc, 0x04, 0x4f, 0xed, 0x90, 0x23, 0xe4, 0x25,
+	0xad, 0x60, 0xfe, 0x04, 0xf3, 0x5f, 0x87, 0xfe, 0x5b, 0xe0, 0x75, 0x2d, 0xa8, 0x46, 0x88, 0x92,
+	0x7e, 0xe4, 0x21, 0x2a, 0xb0, 0x1a, 0x77, 0xa9, 0x63, 0x31, 0x73, 0x05, 0xae, 0x3e, 0x90, 0x04,
+	0x38, 0xf6, 0x5c, 0x87, 0xb2, 0x9c, 0x04, 0xf2, 0x2a, 0x55, 0x27, 0xde, 0xf2, 0xe4, 0x4b, 0x64,
+	0x93, 0xb9, 0x50, 0x56, 0xfc, 0x59, 0xdd, 0xbb, 0x7e, 0x0a, 0x97, 0x14, 0x02, 0x4e, 0x2c, 0x68,
+	0xee, 0x40, 0xed, 0x5b, 0x17, 0x5f, 0xfe, 0xec, 0x8c, 0x60, 0x46, 0xfa, 0x51, 0xb1, 0x0e, 0xf1,
+	0x10, 0x6d, 0x32, 0x0f, 0xc9, 0x9d, 0x87, 0x87, 0xb4, 0x5e, 0xcf, 0x40, 0x51, 0x4c, 0x4e, 0x7d,
+	0x0f, 0x4a, 0x92, 0x63, 0xea, 0xb6, 0x35, 0xe9, 0x17, 0x93, 0x35, 0xc2, 0xe9, 0x8d, 0x8f, 0xa7,
+	0x57, 0x50, 0x57, 0xfb, 0x01, 0x8a, 0x82, 0x0b, 0xea, 0x2b, 0x93, 0x55, 0xd3, 0xcc, 0xd4, 0xf8,
+	0x70, 0x2a, 0x59, 0xe5, 0xa1, 0x03, 0x25, 0x49, 0xb0, 0xb2, 0xae, 0x33, 0x42, 0x38, 0x8d, 0x8f,
+	0xa6, 0x51, 0x48, 0x1c, 0x3d, 0x87, 0xd9, 0x13, 0x4c, 0x4e, 0x6f, 0x4d, 0xa3, 0x7e, 0x72, 0xa0,
+	0x9f, 0xd1, 0xe5, 0x53, 0xc8, 0x6f, 0x22, 0xa6, 0x37, 0x27, 0x2b, 0x1d, 0xd3, 0x3d, 0xe3, 0x83,
+	0x29, 0x24, 0x13, 0xdc, 0x0a, 0xbc, 0xd3, 0xea, 0xd6, 0x64, 0x95, 0x61, 0x76, 0x66, 0xd8, 0x53,
+	0xcb, 0x2b, 0x47, 0x6d, 0x28, 0x70, 0xb2, 0xa5, 0x67, 0xc4, 0x96, 0x22, 0x64, 0xc6, 0xe2, 0x48,
+	0x72, 0x6f, 0xf0, 0x1f, 0xeb, 0xfa, 0x16, 0x14, 0x78, 0x29, 0xe9, 0x19, 0x79, 0x38, 0x4a, 0xa4,
+	0xc6, 0x5a, 0x7c, 0x0c, 0xd5, 0x84, 0x63, 0x64, 0x41, 0x31, 0x4c, 0x46, 0xc6, 0x1a, 0x7d, 0x08,
+	0x65, 0xc5, 0x0e, 0xf4, 0x8c, 0xf7, 0x3e, 0x49, 0x22, 0x26, 0x18, 0x2c, 0x8a, 0x69, 0x9f, 0x15,
+	0xe1, 0x30, 0x25, 0x18, 0x6b, 0xf0, 0x11, 0x94, 0xe4, 0xd8, 0xcf, 0x2a, 0x9a, 0x11, 0x72, 0x30,
+	0xd6, 0x24, 0x86, 0x4a, 0x3c, 0xb9, 0xf5, 0x9b, 0xd9, 0x39, 0x92, 0x22, 0x0a, 0x86, 0x35, 0xad,
+	0xb8, 0xca, 0xa8, 0x17, 0x00, 0xa9, 0x79, 0x79, 0x3b, 0x03, 0xe2, 0xd3, 0x26, 0xbf, 0xf1, 0xc9,
+	0xd9, 0x94, 0x94, 0xe3, 0x47, 0x50, 0x92, 0x03, 0x31, 0x0b, 0xb6, 0x91, 0xb1, 0x39, 0x16, 0xb6,
+	0x1d, 0x28, 0xab, 0xd1, 0x95, 0x95, 0x2b, 0x27, 0xa7, 0xa1, 0x71, 0x73, 0x4a, 0x69, 0x15, 0xfa,
+	0xf7, 0x50, 0xe0, 0x33, 0x27, 0xab, 0x0a, 0x53, 0xf3, 0xcf, 0x58, 0x99, 0x46, 0x54, 0x9a, 0x5f,
+	0xfd, 0xe6, 0xf0, 0xcd, 0xd2, 0x95, 0x3f, 0xde, 0x2c, 0x5d, 0xf9, 0x65, 0xb0, 0xa4, 0x1d, 0x0e,
+	0x96, 0xb4, 0xdf, 0x07, 0x4b, 0xda, 0xdf, 0x83, 0x25, 0xed, 0xe9, 0xbd, 0xf3, 0xfd, 0x65, 0xef,
+	0xae, 0x58, 0x3c, 0x2b, 0x09, 0xb8, 0x6e, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x67, 0xc5, 0x63,
+	0x32, 0x20, 0x14, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
new file mode 100644
index 0000000..eb37318
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/tasks/v1/tasks.proto
@@ -0,0 +1,209 @@
+syntax = "proto3";
+
+package containerd.services.tasks.v1;
+
+import "google/protobuf/empty.proto";
+import "google/protobuf/any.proto";
+import "gogoproto/gogo.proto";
+import "github.com/containerd/containerd/api/types/mount.proto";
+import "github.com/containerd/containerd/api/types/metrics.proto";
+import "github.com/containerd/containerd/api/types/descriptor.proto";
+import "github.com/containerd/containerd/api/types/task/task.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/containerd/containerd/api/services/tasks/v1;tasks";
+
+service Tasks {
+	// Create a task.
+	rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
+
+	// Start a process.
+	rpc Start(StartRequest) returns (StartResponse);
+
+	// Delete a task and on disk state.
+	rpc Delete(DeleteTaskRequest) returns (DeleteResponse);
+
+	rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
+
+	rpc Get(GetRequest) returns (GetResponse);
+
+	rpc List(ListTasksRequest) returns (ListTasksResponse);
+
+	// Kill a task or process.
+	rpc Kill(KillRequest) returns (google.protobuf.Empty);
+
+	rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);
+
+	rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
+
+	rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);
+
+	rpc Pause(PauseTaskRequest) returns (google.protobuf.Empty);
+
+	rpc Resume(ResumeTaskRequest) returns (google.protobuf.Empty);
+
+	rpc ListPids(ListPidsRequest) returns (ListPidsResponse);
+
+	rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);
+
+	rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);
+
+	rpc Metrics(MetricsRequest) returns (MetricsResponse);
+
+	rpc Wait(WaitRequest) returns (WaitResponse);
+}
+
+message CreateTaskRequest {
+	string container_id = 1;
+
+	// RootFS provides the pre-chroot mounts to perform in the shim before
+	// executing the container task.
+	//
+	// These are for mounts that cannot be performed in the user namespace.
+	// Typically, these mounts should be resolved from snapshots specified on
+	// the container object.
+	repeated containerd.types.Mount rootfs = 3;
+
+	string stdin = 4;
+	string stdout = 5;
+	string stderr = 6;
+	bool terminal = 7;
+
+	containerd.types.Descriptor checkpoint = 8;
+
+	google.protobuf.Any options = 9;
+}
+
+message CreateTaskResponse {
+	string container_id = 1;
+	uint32 pid = 2;
+}
+
+message StartRequest {
+	string container_id = 1;
+	string exec_id = 2;
+}
+
+message StartResponse {
+	uint32 pid = 1;
+}
+
+message DeleteTaskRequest {
+	string container_id = 1;
+}
+
+message DeleteResponse {
+	string id = 1;
+	uint32 pid = 2;
+	uint32 exit_status = 3;
+	google.protobuf.Timestamp exited_at = 4 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message DeleteProcessRequest {
+	string container_id = 1;
+	string exec_id = 2;
+}
+
+message GetRequest {
+	string container_id = 1;
+	string exec_id = 2;
+}
+
+message GetResponse {
+	containerd.v1.types.Process process = 1;
+}
+
+message ListTasksRequest {
+	string filter = 1;
+}
+
+message ListTasksResponse {
+	repeated containerd.v1.types.Process tasks = 1;
+}
+
+message KillRequest {
+	string container_id = 1;
+	string exec_id = 2;
+	uint32 signal = 3;
+	bool all = 4;
+}
+
+message ExecProcessRequest {
+	string container_id = 1;
+	string stdin = 2;
+	string stdout = 3;
+	string stderr = 4;
+	bool terminal = 5;
+	// Spec for starting a process in the target container.
+	//
+	// For runc, this is a process spec, for example.
+	google.protobuf.Any spec = 6;
+	// id of the exec process
+	string exec_id = 7;
+}
+
+message ExecProcessResponse {
+}
+
+message ResizePtyRequest {
+	string container_id = 1;
+	string exec_id = 2;
+	uint32 width = 3;
+	uint32 height = 4;
+}
+
+message CloseIORequest {
+	string container_id = 1;
+	string exec_id = 2;
+	bool stdin = 3;
+}
+
+message PauseTaskRequest {
+	string container_id = 1;
+}
+
+message ResumeTaskRequest {
+	string container_id = 1;
+}
+
+message ListPidsRequest {
+	string container_id = 1;
+}
+
+message ListPidsResponse {
+	// Processes includes the process ID and additional process information
+	repeated containerd.v1.types.ProcessInfo processes = 1;
+}
+
+message CheckpointTaskRequest {
+	string container_id = 1;
+	string parent_checkpoint = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+	google.protobuf.Any options = 3;
+}
+
+message CheckpointTaskResponse {
+	repeated containerd.types.Descriptor descriptors = 1;
+}
+
+message UpdateTaskRequest {
+	string container_id = 1;
+	google.protobuf.Any resources = 2;
+}
+
+message MetricsRequest {
+	repeated string filters = 1;
+}
+
+message MetricsResponse {
+	repeated types.Metric metrics = 1;
+}
+
+message WaitRequest {
+	string container_id = 1;
+	string exec_id = 2;
+}
+
+message WaitResponse {
+	uint32 exit_status = 1;
+	google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
new file mode 100644
index 0000000..c403c84
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.pb.go
@@ -0,0 +1,466 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/services/version/v1/version.proto
+// DO NOT EDIT!
+
+/*
+	Package version is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/services/version/v1/version.proto
+
+	It has these top-level messages:
+		VersionResponse
+*/
+package version
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/ptypes/empty"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type VersionResponse struct {
+	Version  string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+	Revision string `protobuf:"bytes,2,opt,name=revision,proto3" json:"revision,omitempty"`
+}
+
+func (m *VersionResponse) Reset()                    { *m = VersionResponse{} }
+func (*VersionResponse) ProtoMessage()               {}
+func (*VersionResponse) Descriptor() ([]byte, []int) { return fileDescriptorVersion, []int{0} }
+
+func init() {
+	proto.RegisterType((*VersionResponse)(nil), "containerd.services.version.v1.VersionResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Version service
+
+type VersionClient interface {
+	Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error)
+}
+
+type versionClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewVersionClient(cc *grpc.ClientConn) VersionClient {
+	return &versionClient{cc}
+}
+
+func (c *versionClient) Version(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*VersionResponse, error) {
+	out := new(VersionResponse)
+	err := grpc.Invoke(ctx, "/containerd.services.version.v1.Version/Version", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Version service
+
+type VersionServer interface {
+	Version(context.Context, *google_protobuf.Empty) (*VersionResponse, error)
+}
+
+func RegisterVersionServer(s *grpc.Server, srv VersionServer) {
+	s.RegisterService(&_Version_serviceDesc, srv)
+}
+
+func _Version_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(google_protobuf.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VersionServer).Version(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.services.version.v1.Version/Version",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VersionServer).Version(ctx, req.(*google_protobuf.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Version_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.services.version.v1.Version",
+	HandlerType: (*VersionServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Version",
+			Handler:    _Version_Version_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/containerd/containerd/api/services/version/v1/version.proto",
+}
+
+func (m *VersionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *VersionResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Version) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintVersion(dAtA, i, uint64(len(m.Version)))
+		i += copy(dAtA[i:], m.Version)
+	}
+	if len(m.Revision) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintVersion(dAtA, i, uint64(len(m.Revision)))
+		i += copy(dAtA[i:], m.Revision)
+	}
+	return i, nil
+}
+
+func encodeFixed64Version(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Version(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintVersion(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *VersionResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Version)
+	if l > 0 {
+		n += 1 + l + sovVersion(uint64(l))
+	}
+	l = len(m.Revision)
+	if l > 0 {
+		n += 1 + l + sovVersion(uint64(l))
+	}
+	return n
+}
+
+func sovVersion(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozVersion(x uint64) (n int) {
+	return sovVersion(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *VersionResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&VersionResponse{`,
+		`Version:` + fmt.Sprintf("%v", this.Version) + `,`,
+		`Revision:` + fmt.Sprintf("%v", this.Revision) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringVersion(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *VersionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowVersion
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: VersionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: VersionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowVersion
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthVersion
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowVersion
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthVersion
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Revision = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipVersion(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthVersion
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipVersion(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowVersion
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowVersion
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowVersion
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthVersion
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowVersion
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipVersion(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthVersion = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowVersion   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/services/version/v1/version.proto", fileDescriptorVersion)
+}
+
+var fileDescriptorVersion = []byte{
+	// 241 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4b, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x17, 0xa7, 0x16, 0x95, 0x65, 0x26, 0xa7, 0x16, 0xeb,
+	0x97, 0xa5, 0x16, 0x15, 0x67, 0xe6, 0xe7, 0xe9, 0x97, 0x19, 0xc2, 0x98, 0x7a, 0x05, 0x45, 0xf9,
+	0x25, 0xf9, 0x42, 0x72, 0x08, 0x1d, 0x7a, 0x30, 0xd5, 0x7a, 0x30, 0x25, 0x65, 0x86, 0x52, 0xd2,
+	0xe9, 0xf9, 0xf9, 0xe9, 0x39, 0xa9, 0xfa, 0x60, 0xd5, 0x49, 0xa5, 0x69, 0xfa, 0xa9, 0xb9, 0x05,
+	0x25, 0x95, 0x10, 0xcd, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0xa6, 0x3e, 0x88, 0x05, 0x11,
+	0x55, 0x72, 0xe7, 0xe2, 0x0f, 0x83, 0x18, 0x10, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a,
+	0x24, 0xc1, 0xc5, 0x0e, 0x35, 0x53, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, 0x08, 0xc6, 0x15, 0x92,
+	0xe2, 0xe2, 0x28, 0x4a, 0x2d, 0xcb, 0x04, 0x4b, 0x31, 0x81, 0xa5, 0xe0, 0x7c, 0xa3, 0x58, 0x2e,
+	0x76, 0xa8, 0x41, 0x42, 0x41, 0x08, 0xa6, 0x98, 0x1e, 0xc4, 0x49, 0x7a, 0x30, 0x27, 0xe9, 0xb9,
+	0x82, 0x9c, 0x24, 0xa5, 0xaf, 0x87, 0xdf, 0x2b, 0x7a, 0x68, 0x8e, 0x72, 0x8a, 0x3a, 0xf1, 0x50,
+	0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72, 0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78,
+	0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x03, 0xb9, 0x81, 0x6b, 0x0d, 0x65, 0x26, 0xb1,
+	0x81, 0x1d, 0x67, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xb6, 0x37, 0xd8, 0xc6, 0xa7, 0x01, 0x00,
+	0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto
new file mode 100644
index 0000000..2398fdc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/services/version/v1/version.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+package containerd.services.version.v1;
+
+import "google/protobuf/empty.proto";
+import "gogoproto/gogo.proto";
+
+// TODO(stevvooe): Should version service actually be versioned?
+option go_package = "github.com/containerd/containerd/api/services/version/v1;version";
+
+service Version {
+	rpc Version(google.protobuf.Empty) returns (VersionResponse);
+}
+
+message VersionResponse {
+	string version = 1;
+	string revision = 2;
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
new file mode 100644
index 0000000..785d050
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/descriptor.pb.go
@@ -0,0 +1,428 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/types/descriptor.proto
+// DO NOT EDIT!
+
+/*
+	Package types is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/types/descriptor.proto
+		github.com/containerd/containerd/api/types/metrics.proto
+		github.com/containerd/containerd/api/types/mount.proto
+		github.com/containerd/containerd/api/types/platform.proto
+
+	It has these top-level messages:
+		Descriptor
+		Metric
+		Mount
+		Platform
+*/
+package types
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// Descriptor describes a blob in a content store.
+//
+// This descriptor can be used to reference content from an
+// oci descriptor found in a manifest.
+// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
+type Descriptor struct {
+	MediaType string                                     `protobuf:"bytes,1,opt,name=media_type,json=mediaType,proto3" json:"media_type,omitempty"`
+	Digest    github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
+	Size_     int64                                      `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"`
+}
+
+func (m *Descriptor) Reset()                    { *m = Descriptor{} }
+func (*Descriptor) ProtoMessage()               {}
+func (*Descriptor) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{0} }
+
+func init() {
+	proto.RegisterType((*Descriptor)(nil), "containerd.types.Descriptor")
+}
+func (m *Descriptor) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Descriptor) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.MediaType) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintDescriptor(dAtA, i, uint64(len(m.MediaType)))
+		i += copy(dAtA[i:], m.MediaType)
+	}
+	if len(m.Digest) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintDescriptor(dAtA, i, uint64(len(m.Digest)))
+		i += copy(dAtA[i:], m.Digest)
+	}
+	if m.Size_ != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintDescriptor(dAtA, i, uint64(m.Size_))
+	}
+	return i, nil
+}
+
+func encodeFixed64Descriptor(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Descriptor(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintDescriptor(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Descriptor) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.MediaType)
+	if l > 0 {
+		n += 1 + l + sovDescriptor(uint64(l))
+	}
+	l = len(m.Digest)
+	if l > 0 {
+		n += 1 + l + sovDescriptor(uint64(l))
+	}
+	if m.Size_ != 0 {
+		n += 1 + sovDescriptor(uint64(m.Size_))
+	}
+	return n
+}
+
+func sovDescriptor(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozDescriptor(x uint64) (n int) {
+	return sovDescriptor(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Descriptor) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Descriptor{`,
+		`MediaType:` + fmt.Sprintf("%v", this.MediaType) + `,`,
+		`Digest:` + fmt.Sprintf("%v", this.Digest) + `,`,
+		`Size_:` + fmt.Sprintf("%v", this.Size_) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringDescriptor(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Descriptor) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowDescriptor
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Descriptor: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Descriptor: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MediaType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDescriptor
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthDescriptor
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.MediaType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDescriptor
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthDescriptor
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType)
+			}
+			m.Size_ = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowDescriptor
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Size_ |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipDescriptor(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthDescriptor
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipDescriptor(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowDescriptor
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowDescriptor
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowDescriptor
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthDescriptor
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowDescriptor
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipDescriptor(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthDescriptor = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowDescriptor   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/descriptor.proto", fileDescriptorDescriptor)
+}
+
+var fileDescriptorDescriptor = []byte{
+	// 232 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xa7, 0xa4, 0x16,
+	0x27, 0x17, 0x65, 0x16, 0x94, 0xe4, 0x17, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20,
+	0x94, 0xe9, 0x81, 0x95, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88,
+	0x3a, 0xa5, 0x6e, 0x46, 0x2e, 0x2e, 0x17, 0xb8, 0x66, 0x21, 0x59, 0x2e, 0xae, 0xdc, 0xd4, 0x94,
+	0xcc, 0xc4, 0x78, 0x90, 0x1e, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x4e, 0xb0, 0x48, 0x48,
+	0x65, 0x41, 0xaa, 0x90, 0x17, 0x17, 0x5b, 0x4a, 0x66, 0x7a, 0x6a, 0x71, 0x89, 0x04, 0x13, 0x48,
+	0xca, 0xc9, 0xe8, 0xc4, 0x3d, 0x79, 0x86, 0x5b, 0xf7, 0xe4, 0xb5, 0x90, 0x9c, 0x9a, 0x5f, 0x90,
+	0x9a, 0x07, 0xb7, 0xbc, 0x58, 0x3f, 0x3d, 0x5f, 0x17, 0xa2, 0x45, 0xcf, 0x05, 0x4c, 0x05, 0x41,
+	0x4d, 0x10, 0x12, 0xe2, 0x62, 0x29, 0xce, 0xac, 0x4a, 0x95, 0x60, 0x56, 0x60, 0xd4, 0x60, 0x0e,
+	0x02, 0xb3, 0x9d, 0xbc, 0x4e, 0x3c, 0x94, 0x63, 0xb8, 0xf1, 0x50, 0x8e, 0xa1, 0xe1, 0x91, 0x1c,
+	0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x18, 0x65, 0x40,
+	0x7c, 0x60, 0x58, 0x83, 0xc9, 0x24, 0x36, 0xb0, 0x07, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff,
+	0x23, 0x14, 0xc9, 0x7c, 0x47, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/descriptor.proto b/vendor/github.com/containerd/containerd/api/types/descriptor.proto
new file mode 100644
index 0000000..7975ab0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/descriptor.proto
@@ -0,0 +1,18 @@
+syntax = "proto3";
+
+package containerd.types;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "github.com/containerd/containerd/api/types;types";
+
+// Descriptor describes a blob in a content store.
+//
+// This descriptor can be used to reference content from an
+// oci descriptor found in a manifest.
+// See https://godoc.org/github.com/opencontainers/image-spec/specs-go/v1#Descriptor
+message Descriptor {
+	string media_type = 1;
+	string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
+	int64 size = 3;
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/doc.go b/vendor/github.com/containerd/containerd/api/types/doc.go
new file mode 100644
index 0000000..ab1254f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/doc.go
@@ -0,0 +1 @@
+package types
diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.pb.go b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
new file mode 100644
index 0000000..f9aacf9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/metrics.pb.go
@@ -0,0 +1,429 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/types/metrics.proto
+// DO NOT EDIT!
+
+package types
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import google_protobuf1 "github.com/gogo/protobuf/types"
+import _ "github.com/gogo/protobuf/types"
+
+import time "time"
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+type Metric struct {
+	Timestamp time.Time             `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
+	ID        string                `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+	Data      *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
+}
+
+func (m *Metric) Reset()                    { *m = Metric{} }
+func (*Metric) ProtoMessage()               {}
+func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} }
+
+func init() {
+	proto.RegisterType((*Metric)(nil), "containerd.types.Metric")
+}
+func (m *Metric) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+	if len(m.ID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Data != nil {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintMetrics(dAtA, i, uint64(m.Data.Size()))
+		n2, err := m.Data.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	return i, nil
+}
+
+func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Metric) Size() (n int) {
+	var l int
+	_ = l
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
+	n += 1 + l + sovMetrics(uint64(l))
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	if m.Data != nil {
+		l = m.Data.Size()
+		n += 1 + l + sovMetrics(uint64(l))
+	}
+	return n
+}
+
+func sovMetrics(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozMetrics(x uint64) (n int) {
+	return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Metric) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Metric{`,
+		`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringMetrics(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Metric) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Metric: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Data == nil {
+				m.Data = &google_protobuf1.Any{}
+			}
+			if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMetrics(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMetrics
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipMetrics(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowMetrics
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowMetrics
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthMetrics
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowMetrics
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipMetrics(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowMetrics   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics)
+}
+
+var fileDescriptorMetrics = []byte{
+	// 256 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
+	0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
+	0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
+	0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
+	0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
+	0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
+	0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
+	0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
+	0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
+	0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
+	0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
+	0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
+	0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x26, 0xb1, 0x81,
+	0xcd, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x51, 0x36, 0x74, 0x83, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/metrics.proto b/vendor/github.com/containerd/containerd/api/types/metrics.proto
new file mode 100644
index 0000000..d1629c7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/metrics.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+
+package containerd.types;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/any.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/containerd/containerd/api/types;types";
+
+message Metric {
+	google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+	string id = 2;
+	google.protobuf.Any data = 3;
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/mount.pb.go b/vendor/github.com/containerd/containerd/api/types/mount.pb.go
new file mode 100644
index 0000000..cc83514
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/mount.pb.go
@@ -0,0 +1,474 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/types/mount.proto
+// DO NOT EDIT!
+
+package types
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Mount describes mounts for a container.
+//
+// This type is the lingua franca of ContainerD. All services provide mounts
+// to be used with the container at creation time.
+//
+// The Mount type follows the structure of the mount syscall, including a type,
+// source, target and options.
+type Mount struct {
+	// Type defines the nature of the mount.
+	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	// Source specifies the name of the mount. Depending on mount type, this
+	// may be a volume name or a host path, or even ignored.
+	Source string `protobuf:"bytes,2,opt,name=source,proto3" json:"source,omitempty"`
+	// Target path in container
+	Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
+	// Options specifies zero or more fstab style mount options.
+	Options []string `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"`
+}
+
+func (m *Mount) Reset()                    { *m = Mount{} }
+func (*Mount) ProtoMessage()               {}
+func (*Mount) Descriptor() ([]byte, []int) { return fileDescriptorMount, []int{0} }
+
+func init() {
+	proto.RegisterType((*Mount)(nil), "containerd.types.Mount")
+}
+func (m *Mount) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Type) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintMount(dAtA, i, uint64(len(m.Type)))
+		i += copy(dAtA[i:], m.Type)
+	}
+	if len(m.Source) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintMount(dAtA, i, uint64(len(m.Source)))
+		i += copy(dAtA[i:], m.Source)
+	}
+	if len(m.Target) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintMount(dAtA, i, uint64(len(m.Target)))
+		i += copy(dAtA[i:], m.Target)
+	}
+	if len(m.Options) > 0 {
+		for _, s := range m.Options {
+			dAtA[i] = 0x22
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	return i, nil
+}
+
+func encodeFixed64Mount(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Mount(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintMount(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Mount) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Type)
+	if l > 0 {
+		n += 1 + l + sovMount(uint64(l))
+	}
+	l = len(m.Source)
+	if l > 0 {
+		n += 1 + l + sovMount(uint64(l))
+	}
+	l = len(m.Target)
+	if l > 0 {
+		n += 1 + l + sovMount(uint64(l))
+	}
+	if len(m.Options) > 0 {
+		for _, s := range m.Options {
+			l = len(s)
+			n += 1 + l + sovMount(uint64(l))
+		}
+	}
+	return n
+}
+
+func sovMount(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozMount(x uint64) (n int) {
+	return sovMount(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Mount) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Mount{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Source:` + fmt.Sprintf("%v", this.Source) + `,`,
+		`Target:` + fmt.Sprintf("%v", this.Target) + `,`,
+		`Options:` + fmt.Sprintf("%v", this.Options) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringMount(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Mount) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowMount
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Mount: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMount
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMount
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMount
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMount
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Source = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMount
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMount
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Target = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowMount
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthMount
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Options = append(m.Options, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipMount(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthMount
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipMount(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowMount
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowMount
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowMount
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthMount
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowMount
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipMount(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthMount = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowMount   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/mount.proto", fileDescriptorMount)
+}
+
+var fileDescriptorMount = []byte{
+	// 200 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4b, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xe6, 0x97,
+	0xe6, 0x95, 0xe8, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x54, 0xe8, 0x81, 0x65, 0xa5,
+	0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x52, 0x2a, 0x17, 0xab,
+	0x2f, 0x48, 0x9b, 0x90, 0x10, 0x17, 0x0b, 0x48, 0x9d, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10,
+	0x98, 0x2d, 0x24, 0xc6, 0xc5, 0x56, 0x9c, 0x5f, 0x5a, 0x94, 0x9c, 0x2a, 0xc1, 0x04, 0x16, 0x85,
+	0xf2, 0x40, 0xe2, 0x25, 0x89, 0x45, 0xe9, 0xa9, 0x25, 0x12, 0xcc, 0x10, 0x71, 0x08, 0x4f, 0x48,
+	0x82, 0x8b, 0x3d, 0xbf, 0xa0, 0x24, 0x33, 0x3f, 0xaf, 0x58, 0x82, 0x45, 0x81, 0x59, 0x83, 0x33,
+	0x08, 0xc6, 0x75, 0xf2, 0x3a, 0xf1, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x86, 0x47, 0x72,
+	0x8c, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0x63, 0x94, 0x01,
+	0xf1, 0x1e, 0xb4, 0x06, 0x93, 0x49, 0x6c, 0x60, 0x97, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
+	0xe5, 0xc7, 0x07, 0x3f, 0x1b, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/mount.proto b/vendor/github.com/containerd/containerd/api/types/mount.proto
new file mode 100644
index 0000000..031e654
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/mount.proto
@@ -0,0 +1,29 @@
+syntax = "proto3";
+
+package containerd.types;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "github.com/containerd/containerd/api/types;types";
+
+// Mount describes mounts for a container.
+//
+// This type is the lingua franca of ContainerD. All services provide mounts
+// to be used with the container at creation time.
+//
+// The Mount type follows the structure of the mount syscall, including a type,
+// source, target and options.
+message Mount {
+	// Type defines the nature of the mount.
+	string type = 1;
+
+	// Source specifies the name of the mount. Depending on mount type, this
+	// may be a volume name or a host path, or even ignored.
+	string source = 2;
+
+	// Target path in container
+	string target = 3;
+
+	// Options specifies zero or more fstab style mount options.
+	repeated string options = 4;
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/platform.pb.go b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
new file mode 100644
index 0000000..0ca2afc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/platform.pb.go
@@ -0,0 +1,412 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/types/platform.proto
+// DO NOT EDIT!
+
+package types
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Platform follows the structure of the OCI platform specification, from
+// descriptors.
+type Platform struct {
+	OS           string `protobuf:"bytes,1,opt,name=os,proto3" json:"os,omitempty"`
+	Architecture string `protobuf:"bytes,2,opt,name=architecture,proto3" json:"architecture,omitempty"`
+	Variant      string `protobuf:"bytes,3,opt,name=variant,proto3" json:"variant,omitempty"`
+}
+
+func (m *Platform) Reset()                    { *m = Platform{} }
+func (*Platform) ProtoMessage()               {}
+func (*Platform) Descriptor() ([]byte, []int) { return fileDescriptorPlatform, []int{0} }
+
+func init() {
+	proto.RegisterType((*Platform)(nil), "containerd.types.Platform")
+}
+func (m *Platform) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Platform) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.OS) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintPlatform(dAtA, i, uint64(len(m.OS)))
+		i += copy(dAtA[i:], m.OS)
+	}
+	if len(m.Architecture) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintPlatform(dAtA, i, uint64(len(m.Architecture)))
+		i += copy(dAtA[i:], m.Architecture)
+	}
+	if len(m.Variant) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintPlatform(dAtA, i, uint64(len(m.Variant)))
+		i += copy(dAtA[i:], m.Variant)
+	}
+	return i, nil
+}
+
+func encodeFixed64Platform(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Platform(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintPlatform(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Platform) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.OS)
+	if l > 0 {
+		n += 1 + l + sovPlatform(uint64(l))
+	}
+	l = len(m.Architecture)
+	if l > 0 {
+		n += 1 + l + sovPlatform(uint64(l))
+	}
+	l = len(m.Variant)
+	if l > 0 {
+		n += 1 + l + sovPlatform(uint64(l))
+	}
+	return n
+}
+
+func sovPlatform(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozPlatform(x uint64) (n int) {
+	return sovPlatform(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Platform) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Platform{`,
+		`OS:` + fmt.Sprintf("%v", this.OS) + `,`,
+		`Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`,
+		`Variant:` + fmt.Sprintf("%v", this.Variant) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringPlatform(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Platform) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowPlatform
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Platform: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowPlatform
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthPlatform
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.OS = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowPlatform
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthPlatform
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Architecture = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowPlatform
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthPlatform
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Variant = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipPlatform(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthPlatform
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipPlatform(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowPlatform
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowPlatform
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowPlatform
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthPlatform
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowPlatform
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipPlatform(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthPlatform = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowPlatform   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/platform.proto", fileDescriptorPlatform)
+}
+
+var fileDescriptorPlatform = []byte{
+	// 203 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x17, 0xe4, 0x24,
+	0x96, 0xa4, 0xe5, 0x17, 0xe5, 0xea, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0x14, 0xe9,
+	0x81, 0x15, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x25, 0xf5, 0x41, 0x2c, 0x88, 0x3a, 0xa5,
+	0x04, 0x2e, 0x8e, 0x00, 0xa8, 0x4e, 0x21, 0x31, 0x2e, 0xa6, 0xfc, 0x62, 0x09, 0x46, 0x05, 0x46,
+	0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0xfc, 0x83, 0x83, 0x98, 0xf2, 0x8b, 0x85, 0x94,
+	0xb8, 0x78, 0x12, 0x8b, 0x92, 0x33, 0x32, 0x4b, 0x52, 0x93, 0x4b, 0x4a, 0x8b, 0x52, 0x25, 0x98,
+	0x40, 0x2a, 0x82, 0x50, 0xc4, 0x84, 0x24, 0xb8, 0xd8, 0xcb, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x4a,
+	0x24, 0x98, 0xc1, 0xd2, 0x30, 0xae, 0x93, 0xd7, 0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31,
+	0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4,
+	0x18, 0xa3, 0x0c, 0x88, 0xf7, 0x9e, 0x35, 0x98, 0x4c, 0x62, 0x03, 0x3b, 0xda, 0x18, 0x10, 0x00,
+	0x00, 0xff, 0xff, 0x97, 0xa1, 0x99, 0x56, 0x19, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/platform.proto b/vendor/github.com/containerd/containerd/api/types/platform.proto
new file mode 100644
index 0000000..b1dce06
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/platform.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+
+package containerd.types;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "github.com/containerd/containerd/api/types;types";
+
+// Platform follows the structure of the OCI platform specification, from
+// descriptors.
+message Platform {
+	string os = 1 [(gogoproto.customname) = "OS"];
+	string architecture = 2;
+	string variant = 3;
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.pb.go b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
new file mode 100644
index 0000000..ccc230a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/task/task.pb.go
@@ -0,0 +1,907 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/api/types/task/task.proto
+// DO NOT EDIT!
+
+/*
+	Package task is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/api/types/task/task.proto
+
+	It has these top-level messages:
+		Process
+		ProcessInfo
+*/
+package task
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/gogo/protobuf/types"
+import google_protobuf2 "github.com/gogo/protobuf/types"
+
+import time "time"
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type Status int32
+
+const (
+	StatusUnknown Status = 0
+	StatusCreated Status = 1
+	StatusRunning Status = 2
+	StatusStopped Status = 3
+	StatusPaused  Status = 4
+	StatusPausing Status = 5
+)
+
+var Status_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "CREATED",
+	2: "RUNNING",
+	3: "STOPPED",
+	4: "PAUSED",
+	5: "PAUSING",
+}
+var Status_value = map[string]int32{
+	"UNKNOWN": 0,
+	"CREATED": 1,
+	"RUNNING": 2,
+	"STOPPED": 3,
+	"PAUSED":  4,
+	"PAUSING": 5,
+}
+
+func (x Status) String() string {
+	return proto.EnumName(Status_name, int32(x))
+}
+func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
+
+type Process struct {
+	ContainerID string    `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
+	ID          string    `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
+	Pid         uint32    `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+	Status      Status    `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
+	Stdin       string    `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout      string    `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr      string    `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal    bool      `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	ExitStatus  uint32    `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt    time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+}
+
+func (m *Process) Reset()                    { *m = Process{} }
+func (*Process) ProtoMessage()               {}
+func (*Process) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
+
+type ProcessInfo struct {
+	// PID is the process ID.
+	Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+	// Info contains additional process information.
+	//
+	// Info varies by platform.
+	Info *google_protobuf2.Any `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"`
+}
+
+func (m *ProcessInfo) Reset()                    { *m = ProcessInfo{} }
+func (*ProcessInfo) ProtoMessage()               {}
+func (*ProcessInfo) Descriptor() ([]byte, []int) { return fileDescriptorTask, []int{1} }
+
+func init() {
+	proto.RegisterType((*Process)(nil), "containerd.v1.types.Process")
+	proto.RegisterType((*ProcessInfo)(nil), "containerd.v1.types.ProcessInfo")
+	proto.RegisterEnum("containerd.v1.types.Status", Status_name, Status_value)
+}
+func (m *Process) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Process) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ContainerID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ContainerID)))
+		i += copy(dAtA[i:], m.ContainerID)
+	}
+	if len(m.ID) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+	}
+	if m.Status != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Status))
+	}
+	if len(m.Stdin) > 0 {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Stdin)))
+		i += copy(dAtA[i:], m.Stdin)
+	}
+	if len(m.Stdout) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Stdout)))
+		i += copy(dAtA[i:], m.Stdout)
+	}
+	if len(m.Stderr) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(len(m.Stderr)))
+		i += copy(dAtA[i:], m.Stderr)
+	}
+	if m.Terminal {
+		dAtA[i] = 0x40
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.ExitStatus != 0 {
+		dAtA[i] = 0x48
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
+	}
+	dAtA[i] = 0x52
+	i++
+	i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+	return i, nil
+}
+
+func (m *ProcessInfo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ProcessInfo) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Pid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Pid))
+	}
+	if m.Info != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintTask(dAtA, i, uint64(m.Info.Size()))
+		n2, err := m.Info.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n2
+	}
+	return i, nil
+}
+
+func encodeFixed64Task(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Task(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintTask(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Process) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ContainerID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovTask(uint64(m.Pid))
+	}
+	if m.Status != 0 {
+		n += 1 + sovTask(uint64(m.Status))
+	}
+	l = len(m.Stdin)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.Stdout)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	l = len(m.Stderr)
+	if l > 0 {
+		n += 1 + l + sovTask(uint64(l))
+	}
+	if m.Terminal {
+		n += 2
+	}
+	if m.ExitStatus != 0 {
+		n += 1 + sovTask(uint64(m.ExitStatus))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
+	n += 1 + l + sovTask(uint64(l))
+	return n
+}
+
+func (m *ProcessInfo) Size() (n int) {
+	var l int
+	_ = l
+	if m.Pid != 0 {
+		n += 1 + sovTask(uint64(m.Pid))
+	}
+	if m.Info != nil {
+		l = m.Info.Size()
+		n += 1 + l + sovTask(uint64(l))
+	}
+	return n
+}
+
+func sovTask(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozTask(x uint64) (n int) {
+	return sovTask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Process) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Process{`,
+		`ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ProcessInfo) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ProcessInfo{`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`Info:` + strings.Replace(fmt.Sprintf("%v", this.Info), "Any", "google_protobuf2.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringTask(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Process) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Process: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Process: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ContainerID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			m.Status = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Status |= (Status(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdin = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdout = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stderr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
+			}
+			m.ExitStatus = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ProcessInfo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ProcessInfo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ProcessInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthTask
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Info == nil {
+				m.Info = &google_protobuf2.Any{}
+			}
+			if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipTask(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthTask
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipTask(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowTask
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowTask
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthTask
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowTask
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipTask(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthTask = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowTask   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/api/types/task/task.proto", fileDescriptorTask)
+}
+
+var fileDescriptorTask = []byte{
+	// 543 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xcf, 0x6e, 0xd3, 0x4c,
+	0x14, 0xc5, 0x33, 0x6e, 0xe3, 0x24, 0xe3, 0xb6, 0x9f, 0x3f, 0x13, 0x55, 0xc6, 0x20, 0xdb, 0xea,
+	0xca, 0x62, 0x61, 0x8b, 0x74, 0xc7, 0x2e, 0xff, 0x84, 0x2c, 0x24, 0x37, 0x72, 0x12, 0xb1, 0x8c,
+	0x9c, 0x78, 0x62, 0x46, 0x6d, 0x66, 0x2c, 0x7b, 0x0c, 0x64, 0xc7, 0x12, 0x75, 0xc5, 0x0b, 0x74,
+	0x05, 0x4f, 0xc1, 0x13, 0x64, 0xc9, 0x0a, 0xb1, 0x0a, 0xd4, 0x4f, 0x82, 0xc6, 0x76, 0xd2, 0x08,
+	0xd8, 0x8c, 0xee, 0x3d, 0xbf, 0x33, 0x77, 0xee, 0x1c, 0xf8, 0x22, 0xc2, 0xec, 0x4d, 0x36, 0xb7,
+	0x17, 0x74, 0xe5, 0x2c, 0x28, 0x61, 0x01, 0x26, 0x28, 0x09, 0x0f, 0xcb, 0x20, 0xc6, 0x0e, 0x5b,
+	0xc7, 0x28, 0x75, 0x58, 0x90, 0x5e, 0x17, 0x87, 0x1d, 0x27, 0x94, 0x51, 0xe5, 0xd1, 0x83, 0xcb,
+	0x7e, 0xfb, 0xdc, 0x2e, 0x4c, 0x5a, 0x3b, 0xa2, 0x11, 0x2d, 0xb8, 0xc3, 0xab, 0xd2, 0xaa, 0x19,
+	0x11, 0xa5, 0xd1, 0x0d, 0x72, 0x8a, 0x6e, 0x9e, 0x2d, 0x1d, 0x86, 0x57, 0x28, 0x65, 0xc1, 0x2a,
+	0xae, 0x0c, 0x8f, 0xff, 0x34, 0x04, 0x64, 0x5d, 0xa2, 0x8b, 0x5c, 0x80, 0x8d, 0x51, 0x42, 0x17,
+	0x28, 0x4d, 0x95, 0x0e, 0x3c, 0xd9, 0x3f, 0x3a, 0xc3, 0xa1, 0x0a, 0x4c, 0x60, 0xb5, 0x7a, 0xff,
+	0xe5, 0x5b, 0x43, 0xea, 0xef, 0x74, 0x77, 0xe0, 0x4b, 0x7b, 0x93, 0x1b, 0x2a, 0xe7, 0x50, 0xc0,
+	0xa1, 0x2a, 0x14, 0x4e, 0x31, 0xdf, 0x1a, 0x82, 0x3b, 0xf0, 0x05, 0x1c, 0x2a, 0x32, 0x3c, 0x8a,
+	0x71, 0xa8, 0x1e, 0x99, 0xc0, 0x3a, 0xf5, 0x79, 0xa9, 0x5c, 0x42, 0x31, 0x65, 0x01, 0xcb, 0x52,
+	0xf5, 0xd8, 0x04, 0xd6, 0x59, 0xe7, 0x89, 0xfd, 0x8f, 0x1f, 0xda, 0xe3, 0xc2, 0xe2, 0x57, 0x56,
+	0xa5, 0x0d, 0xeb, 0x29, 0x0b, 0x31, 0x51, 0xeb, 0xfc, 0x05, 0xbf, 0x6c, 0x94, 0x73, 0x3e, 0x2a,
+	0xa4, 0x19, 0x53, 0xc5, 0x42, 0xae, 0xba, 0x4a, 0x47, 0x49, 0xa2, 0x36, 0xf6, 0x3a, 0x4a, 0x12,
+	0x45, 0x83, 0x4d, 0x86, 0x92, 0x15, 0x26, 0xc1, 0x8d, 0xda, 0x34, 0x81, 0xd5, 0xf4, 0xf7, 0xbd,
+	0x62, 0x40, 0x09, 0xbd, 0xc7, 0x6c, 0x56, 0xed, 0xd6, 0x2a, 0x16, 0x86, 0x5c, 0x2a, 0x57, 0x51,
+	0xba, 0xb0, 0xc5, 0x3b, 0x14, 0xce, 0x02, 0xa6, 0x42, 0x13, 0x58, 0x52, 0x47, 0xb3, 0xcb, 0x40,
+	0xed, 0x5d, 0xa0, 0xf6, 0x64, 0x97, 0x78, 0xaf, 0xb9, 0xd9, 0x1a, 0xb5, 0x4f, 0x3f, 0x0d, 0xe0,
+	0x37, 0xcb, 0x6b, 0x5d, 0x76, 0xe1, 0x42, 0xa9, 0xca, 0xd8, 0x25, 0x4b, 0xba, 0xcb, 0x06, 0x3c,
+	0x64, 0x63, 0xc1, 0x63, 0x4c, 0x96, 0xb4, 0xc8, 0x51, 0xea, 0xb4, 0xff, 0x1a, 0xdf, 0x25, 0x6b,
+	0xbf, 0x70, 0x3c, 0xfb, 0x0e, 0xa0, 0x58, 0x2d, 0xa6, 0xc3, 0xc6, 0xd4, 0x7b, 0xe5, 0x5d, 0xbd,
+	0xf6, 0xe4, 0x9a, 0xf6, 0xff, 0xed, 0x9d, 0x79, 0x5a, 0x82, 0x29, 0xb9, 0x26, 0xf4, 0x1d, 0xe1,
+	0xbc, 0xef, 0x0f, 0xbb, 0x93, 0xe1, 0x40, 0x06, 0x87, 0xbc, 0x9f, 0xa0, 0x80, 0xa1, 0x90, 0x73,
+	0x7f, 0xea, 0x79, 0xae, 0xf7, 0x52, 0x16, 0x0e, 0xb9, 0x9f, 0x11, 0x82, 0x49, 0xc4, 0xf9, 0x78,
+	0x72, 0x35, 0x1a, 0x0d, 0x07, 0xf2, 0xd1, 0x21, 0x1f, 0x33, 0x1a, 0xc7, 0x28, 0x54, 0x9e, 0x42,
+	0x71, 0xd4, 0x9d, 0x8e, 0x87, 0x03, 0xf9, 0x58, 0x93, 0x6f, 0xef, 0xcc, 0x93, 0x12, 0x8f, 0x82,
+	0x2c, 0x2d, 0xa7, 0x73, 0xca, 0xa7, 0xd7, 0x0f, 0x6f, 0x73, 0x8c, 0x49, 0xa4, 0x9d, 0x7d, 0xfc,
+	0xac, 0xd7, 0xbe, 0x7e, 0xd1, 0xab, 0xdf, 0xf4, 0xd4, 0xcd, 0xbd, 0x5e, 0xfb, 0x71, 0xaf, 0xd7,
+	0x3e, 0xe4, 0x3a, 0xd8, 0xe4, 0x3a, 0xf8, 0x96, 0xeb, 0xe0, 0x57, 0xae, 0x83, 0xb9, 0x58, 0xc4,
+	0x70, 0xf9, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x19, 0xf7, 0x5b, 0x8f, 0x4e, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/api/types/task/task.proto b/vendor/github.com/containerd/containerd/api/types/task/task.proto
new file mode 100644
index 0000000..5845edf
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/api/types/task/task.proto
@@ -0,0 +1,41 @@
+syntax = "proto3";
+
+package containerd.v1.types;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/timestamp.proto";
+import "google/protobuf/any.proto";
+
+enum Status {
+	option (gogoproto.goproto_enum_prefix) = false;
+	option (gogoproto.enum_customname) = "Status";
+
+	UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "StatusUnknown"];
+	CREATED = 1 [(gogoproto.enumvalue_customname) = "StatusCreated"];
+	RUNNING = 2 [(gogoproto.enumvalue_customname) = "StatusRunning"];
+	STOPPED = 3 [(gogoproto.enumvalue_customname) = "StatusStopped"];
+	PAUSED = 4 [(gogoproto.enumvalue_customname) = "StatusPaused"];
+	PAUSING = 5 [(gogoproto.enumvalue_customname) = "StatusPausing"];
+}
+
+message Process {
+	string container_id = 1;
+	string id = 2;
+	uint32 pid = 3;
+	Status status = 4;
+	string stdin = 5;
+	string stdout = 6;
+	string stderr = 7;
+	bool terminal = 8;
+	uint32 exit_status = 9;
+	google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message ProcessInfo {
+	// PID is the process ID.
+	uint32 pid = 1;
+	// Info contains additional process information.
+	//
+	// Info varies by platform.
+	google.protobuf.Any info = 2;
+}
diff --git a/vendor/github.com/containerd/containerd/archive/path.go b/vendor/github.com/containerd/containerd/archive/path.go
new file mode 100644
index 0000000..0f6cfa3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/archive/path.go
@@ -0,0 +1 @@
+package archive
diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go
new file mode 100644
index 0000000..0550f21
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/archive/tar.go
@@ -0,0 +1,567 @@
+package archive
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"runtime"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/containerd/containerd/fs"
+	"github.com/containerd/containerd/log"
+	"github.com/dmcgowan/go-tar"
+	"github.com/pkg/errors"
+)
+
+var (
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return make([]byte, 32*1024)
+		},
+	}
+)
+
+// Diff returns a tar stream of the computed filesystem
+// difference between the provided directories.
+//
+// Produces a tar using OCI style file markers for deletions. Deleted
+// files will be prepended with the prefix ".wh.". This style is
+// based off AUFS whiteouts.
+// See https://github.com/opencontainers/image-spec/blob/master/layer.md
+func Diff(ctx context.Context, a, b string) io.ReadCloser {
+	r, w := io.Pipe()
+
+	go func() {
+		err := WriteDiff(ctx, w, a, b)
+		if err = w.CloseWithError(err); err != nil {
+			log.G(ctx).WithError(err).Debugf("closing tar pipe failed")
+		}
+	}()
+
+	return r
+}
+
+// WriteDiff writes a tar stream of the computed difference between the
+// provided directories.
+//
+// Produces a tar using OCI style file markers for deletions. Deleted
+// files will be prepended with the prefix ".wh.". This style is
+// based off AUFS whiteouts.
+// See https://github.com/opencontainers/image-spec/blob/master/layer.md
+func WriteDiff(ctx context.Context, w io.Writer, a, b string) error {
+	cw := newChangeWriter(w, b)
+	err := fs.Changes(ctx, a, b, cw.HandleChange)
+	if err != nil {
+		return errors.Wrap(err, "failed to create diff tar stream")
+	}
+	return cw.Close()
+}
+
+const (
+	// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
+	// filename this means that file has been removed from the base layer.
+	// See https://github.com/opencontainers/image-spec/blob/master/layer.md#whiteouts
+	whiteoutPrefix = ".wh."
+
+	// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+	// for removing an actual file. Normally these files are excluded from exported
+	// archives.
+	whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
+
+	// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+	// layers. Normally these should not go into exported archives and all changed
+	// hardlinks should be copied to the top layer.
+	whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
+
+	// whiteoutOpaqueDir file means directory has been made opaque - meaning
+	// readdir calls to this directory do not follow to lower layers.
+	whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
+)
+
+// Apply applies a tar stream of an OCI style diff tar.
+// See https://github.com/opencontainers/image-spec/blob/master/layer.md#applying-changesets
+func Apply(ctx context.Context, root string, r io.Reader) (int64, error) {
+	root = filepath.Clean(root)
+
+	var (
+		tr   = tar.NewReader(r)
+		size int64
+		dirs []*tar.Header
+
+		// Used for handling opaque directory markers which
+		// may occur out of order
+		unpackedPaths = make(map[string]struct{})
+
+		// Used for aufs plink directory
+		aufsTempdir   = ""
+		aufsHardlinks = make(map[string]*tar.Header)
+	)
+
+	// Iterate through the files in the archive.
+	for {
+		select {
+		case <-ctx.Done():
+			return 0, ctx.Err()
+		default:
+		}
+
+		hdr, err := tr.Next()
+		if err == io.EOF {
+			// end of tar archive
+			break
+		}
+		if err != nil {
+			return 0, err
+		}
+
+		size += hdr.Size
+
+		// Normalize name, for safety and for a simple is-root check
+		hdr.Name = filepath.Clean(hdr.Name)
+
+		if skipFile(hdr) {
+			log.G(ctx).Warnf("file %q ignored: archive may not be supported on system", hdr.Name)
+			continue
+		}
+
+		// Split name and resolve symlinks for root directory.
+		ppath, base := filepath.Split(hdr.Name)
+		ppath, err = fs.RootPath(root, ppath)
+		if err != nil {
+			return 0, errors.Wrap(err, "failed to get root path")
+		}
+
+		// Join to root before joining to parent path to ensure relative links are
+		// already resolved based on the root before adding to parent.
+		path := filepath.Join(ppath, filepath.Join("/", base))
+		if path == root {
+			log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name)
+			continue
+		}
+
+		// If file is not directly under root, ensure parent directory
+		// exists or is created.
+		if ppath != root {
+			parentPath := ppath
+			if base == "" {
+				parentPath = filepath.Dir(path)
+			}
+			if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
+				err = mkdirAll(parentPath, 0700)
+				if err != nil {
+					return 0, err
+				}
+			}
+		}
+
+		// Skip AUFS metadata dirs
+		if strings.HasPrefix(hdr.Name, whiteoutMetaPrefix) {
+			// Regular files inside /.wh..wh.plnk can be used as hardlink targets
+			// We don't want this directory, but we need the files in them so that
+			// such hardlinks can be resolved.
+			if strings.HasPrefix(hdr.Name, whiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
+				basename := filepath.Base(hdr.Name)
+				aufsHardlinks[basename] = hdr
+				if aufsTempdir == "" {
+					if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
+						return 0, err
+					}
+					defer os.RemoveAll(aufsTempdir)
+				}
+				p, err := fs.RootPath(aufsTempdir, basename)
+				if err != nil {
+					return 0, err
+				}
+				if err := createTarFile(ctx, p, root, hdr, tr); err != nil {
+					return 0, err
+				}
+			}
+
+			if hdr.Name != whiteoutOpaqueDir {
+				continue
+			}
+		}
+
+		if strings.HasPrefix(base, whiteoutPrefix) {
+			dir := filepath.Dir(path)
+			if base == whiteoutOpaqueDir {
+				_, err := os.Lstat(dir)
+				if err != nil {
+					return 0, err
+				}
+				err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
+					if err != nil {
+						if os.IsNotExist(err) {
+							err = nil // parent was deleted
+						}
+						return err
+					}
+					if path == dir {
+						return nil
+					}
+					if _, exists := unpackedPaths[path]; !exists {
+						err := os.RemoveAll(path)
+						return err
+					}
+					return nil
+				})
+				if err != nil {
+					return 0, err
+				}
+				continue
+			}
+
+			originalBase := base[len(whiteoutPrefix):]
+			originalPath := filepath.Join(dir, originalBase)
+			if err := os.RemoveAll(originalPath); err != nil {
+				return 0, err
+			}
+			continue
+		}
+		// If path exits we almost always just want to remove and replace it.
+		// The only exception is when it is a directory *and* the file from
+		// the layer is also a directory. Then we want to merge them (i.e.
+		// just apply the metadata from the layer).
+		if fi, err := os.Lstat(path); err == nil {
+			if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
+				if err := os.RemoveAll(path); err != nil {
+					return 0, err
+				}
+			}
+		}
+
+		srcData := io.Reader(tr)
+		srcHdr := hdr
+
+		// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
+		// we manually retarget these into the temporary files we extracted them into
+		if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), whiteoutLinkDir) {
+			linkBasename := filepath.Base(hdr.Linkname)
+			srcHdr = aufsHardlinks[linkBasename]
+			if srcHdr == nil {
+				return 0, fmt.Errorf("Invalid aufs hardlink")
+			}
+			p, err := fs.RootPath(aufsTempdir, linkBasename)
+			if err != nil {
+				return 0, err
+			}
+			tmpFile, err := os.Open(p)
+			if err != nil {
+				return 0, err
+			}
+			defer tmpFile.Close()
+			srcData = tmpFile
+		}
+
+		if err := createTarFile(ctx, path, root, srcHdr, srcData); err != nil {
+			return 0, err
+		}
+
+		// Directory mtimes must be handled at the end to avoid further
+		// file creation in them to modify the directory mtime
+		if hdr.Typeflag == tar.TypeDir {
+			dirs = append(dirs, hdr)
+		}
+		unpackedPaths[path] = struct{}{}
+	}
+
+	for _, hdr := range dirs {
+		path, err := fs.RootPath(root, hdr.Name)
+		if err != nil {
+			return 0, err
+		}
+		if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil {
+			return 0, err
+		}
+	}
+
+	return size, nil
+}
+
+type changeWriter struct {
+	tw        *tar.Writer
+	source    string
+	whiteoutT time.Time
+	inodeSrc  map[uint64]string
+	inodeRefs map[uint64][]string
+}
+
+func newChangeWriter(w io.Writer, source string) *changeWriter {
+	return &changeWriter{
+		tw:        tar.NewWriter(w),
+		source:    source,
+		whiteoutT: time.Now(),
+		inodeSrc:  map[uint64]string{},
+		inodeRefs: map[uint64][]string{},
+	}
+}
+
+func (cw *changeWriter) HandleChange(k fs.ChangeKind, p string, f os.FileInfo, err error) error {
+	if err != nil {
+		return err
+	}
+	if k == fs.ChangeKindDelete {
+		whiteOutDir := filepath.Dir(p)
+		whiteOutBase := filepath.Base(p)
+		whiteOut := filepath.Join(whiteOutDir, whiteoutPrefix+whiteOutBase)
+		hdr := &tar.Header{
+			Name:       whiteOut[1:],
+			Size:       0,
+			ModTime:    cw.whiteoutT,
+			AccessTime: cw.whiteoutT,
+			ChangeTime: cw.whiteoutT,
+		}
+		if err := cw.tw.WriteHeader(hdr); err != nil {
+			return errors.Wrap(err, "failed to write whiteout header")
+		}
+	} else {
+		var (
+			link   string
+			err    error
+			source = filepath.Join(cw.source, p)
+		)
+
+		if f.Mode()&os.ModeSymlink != 0 {
+			if link, err = os.Readlink(source); err != nil {
+				return err
+			}
+		}
+
+		hdr, err := tar.FileInfoHeader(f, link)
+		if err != nil {
+			return err
+		}
+
+		hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
+
+		name := p
+		if strings.HasPrefix(name, string(filepath.Separator)) {
+			name, err = filepath.Rel(string(filepath.Separator), name)
+			if err != nil {
+				return errors.Wrap(err, "failed to make path relative")
+			}
+		}
+		name, err = tarName(name)
+		if err != nil {
+			return errors.Wrap(err, "cannot canonicalize path")
+		}
+		// suffix with '/' for directories
+		if f.IsDir() && !strings.HasSuffix(name, "/") {
+			name += "/"
+		}
+		hdr.Name = name
+
+		if err := setHeaderForSpecialDevice(hdr, name, f); err != nil {
+			return errors.Wrap(err, "failed to set device headers")
+		}
+
+		// additionalLinks stores file names which must be linked to
+		// this file when this file is added
+		var additionalLinks []string
+		inode, isHardlink := fs.GetLinkInfo(f)
+		if isHardlink {
+			// If the inode has a source, always link to it
+			if source, ok := cw.inodeSrc[inode]; ok {
+				hdr.Typeflag = tar.TypeLink
+				hdr.Linkname = source
+				hdr.Size = 0
+			} else {
+				if k == fs.ChangeKindUnmodified {
+					cw.inodeRefs[inode] = append(cw.inodeRefs[inode], name)
+					return nil
+				}
+				cw.inodeSrc[inode] = name
+				additionalLinks = cw.inodeRefs[inode]
+				delete(cw.inodeRefs, inode)
+			}
+		} else if k == fs.ChangeKindUnmodified {
+			// Nothing to write to diff
+			return nil
+		}
+
+		if capability, err := getxattr(source, "security.capability"); err != nil {
+			return errors.Wrap(err, "failed to get capabilities xattr")
+		} else if capability != nil {
+			hdr.Xattrs = map[string]string{
+				"security.capability": string(capability),
+			}
+		}
+
+		if err := cw.tw.WriteHeader(hdr); err != nil {
+			return errors.Wrap(err, "failed to write file header")
+		}
+
+		if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
+			file, err := open(source)
+			if err != nil {
+				return errors.Wrapf(err, "failed to open path: %v", source)
+			}
+			defer file.Close()
+
+			buf := bufferPool.Get().([]byte)
+			n, err := io.CopyBuffer(cw.tw, file, buf)
+			bufferPool.Put(buf)
+			if err != nil {
+				return errors.Wrap(err, "failed to copy")
+			}
+			if n != hdr.Size {
+				return errors.New("short write copying file")
+			}
+		}
+
+		if additionalLinks != nil {
+			source = hdr.Name
+			for _, extra := range additionalLinks {
+				hdr.Name = extra
+				hdr.Typeflag = tar.TypeLink
+				hdr.Linkname = source
+				hdr.Size = 0
+				if err := cw.tw.WriteHeader(hdr); err != nil {
+					return errors.Wrap(err, "failed to write file header")
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (cw *changeWriter) Close() error {
+	if err := cw.tw.Close(); err != nil {
+		return errors.Wrap(err, "failed to close tar writer")
+	}
+	return nil
+}
+
+func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header, reader io.Reader) error {
+	// hdr.Mode is in linux format, which we can use for syscalls,
+	// but for os.Foo() calls we need the mode converted to os.FileMode,
+	// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
+	hdrInfo := hdr.FileInfo()
+
+	switch hdr.Typeflag {
+	case tar.TypeDir:
+		// Create directory unless it exists as a directory already.
+		// In that case we just want to merge the two
+		if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) {
+			if err := mkdir(path, hdrInfo.Mode()); err != nil {
+				return err
+			}
+		}
+
+	case tar.TypeReg, tar.TypeRegA:
+		file, err := openFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, hdrInfo.Mode())
+		if err != nil {
+			return err
+		}
+
+		_, err = copyBuffered(ctx, file, reader)
+		if err1 := file.Close(); err == nil {
+			err = err1
+		}
+		if err != nil {
+			return err
+		}
+
+	case tar.TypeBlock, tar.TypeChar:
+		// Handle this is an OS-specific way
+		if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+			return err
+		}
+
+	case tar.TypeFifo:
+		// Handle this is an OS-specific way
+		if err := handleTarTypeBlockCharFifo(hdr, path); err != nil {
+			return err
+		}
+
+	case tar.TypeLink:
+		targetPath, err := fs.RootPath(extractDir, hdr.Linkname)
+		if err != nil {
+			return err
+		}
+		if err := os.Link(targetPath, path); err != nil {
+			return err
+		}
+
+	case tar.TypeSymlink:
+		if err := os.Symlink(hdr.Linkname, path); err != nil {
+			return err
+		}
+
+	case tar.TypeXGlobalHeader:
+		log.G(ctx).Debug("PAX Global Extended Headers found and ignored")
+		return nil
+
+	default:
+		return errors.Errorf("unhandled tar header type %d\n", hdr.Typeflag)
+	}
+
+	// Lchown is not supported on Windows.
+	if runtime.GOOS != "windows" {
+		if err := os.Lchown(path, hdr.Uid, hdr.Gid); err != nil {
+			return err
+		}
+	}
+
+	for key, value := range hdr.Xattrs {
+		if err := setxattr(path, key, value); err != nil {
+			if errors.Cause(err) == syscall.ENOTSUP {
+				log.G(ctx).WithError(err).Warnf("ignored xattr %s in archive", key)
+				continue
+			}
+			return err
+		}
+	}
+
+	// There is no LChmod, so ignore mode for symlink. Also, this
+	// must happen after chown, as that can modify the file mode
+	if err := handleLChmod(hdr, path, hdrInfo); err != nil {
+		return err
+	}
+
+	return chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime))
+}
+
+func copyBuffered(ctx context.Context, dst io.Writer, src io.Reader) (written int64, err error) {
+	buf := bufferPool.Get().([]byte)
+	defer bufferPool.Put(buf)
+
+	for {
+		select {
+		case <-ctx.Done():
+			err = ctx.Err()
+			return
+		default:
+		}
+
+		nr, er := src.Read(buf)
+		if nr > 0 {
+			nw, ew := dst.Write(buf[0:nr])
+			if nw > 0 {
+				written += int64(nw)
+			}
+			if ew != nil {
+				err = ew
+				break
+			}
+			if nr != nw {
+				err = io.ErrShortWrite
+				break
+			}
+		}
+		if er != nil {
+			if er != io.EOF {
+				err = er
+			}
+			break
+		}
+	}
+	return written, err
+
+}
diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go
new file mode 100644
index 0000000..44b1069
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go
@@ -0,0 +1,130 @@
+// +build !windows
+
+package archive
+
+import (
+	"os"
+	"sync"
+	"syscall"
+
+	"github.com/containerd/continuity/sysx"
+	"github.com/dmcgowan/go-tar"
+	"github.com/opencontainers/runc/libcontainer/system"
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+)
+
+func tarName(p string) (string, error) {
+	return p, nil
+}
+
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+	return perm
+}
+
+func setHeaderForSpecialDevice(hdr *tar.Header, name string, fi os.FileInfo) error {
+	s, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return errors.New("unsupported stat type")
+	}
+
+	// Currently go does not fill in the major/minors
+	if s.Mode&syscall.S_IFBLK != 0 ||
+		s.Mode&syscall.S_IFCHR != 0 {
+		hdr.Devmajor = int64(unix.Major(uint64(s.Rdev)))
+		hdr.Devminor = int64(unix.Minor(uint64(s.Rdev)))
+	}
+
+	return nil
+}
+
+func open(p string) (*os.File, error) {
+	return os.Open(p)
+}
+
+func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
+	f, err := os.OpenFile(name, flag, perm)
+	if err != nil {
+		return nil, err
+	}
+	// Call chmod to avoid permission mask
+	if err := os.Chmod(name, perm); err != nil {
+		return nil, err
+	}
+	return f, err
+}
+
+func mkdirAll(path string, perm os.FileMode) error {
+	return os.MkdirAll(path, perm)
+}
+
+func mkdir(path string, perm os.FileMode) error {
+	if err := os.Mkdir(path, perm); err != nil {
+		return err
+	}
+	// Only final created directory gets explicit permission
+	// call to avoid permission mask
+	return os.Chmod(path, perm)
+}
+
+func skipFile(*tar.Header) bool {
+	return false
+}
+
+var (
+	inUserNS bool
+	nsOnce   sync.Once
+)
+
+func setInUserNS() {
+	inUserNS = system.RunningInUserNS()
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+	nsOnce.Do(setInUserNS)
+	if inUserNS {
+		// cannot create a device if running in user namespace
+		return nil
+	}
+
+	mode := uint32(hdr.Mode & 07777)
+	switch hdr.Typeflag {
+	case tar.TypeBlock:
+		mode |= unix.S_IFBLK
+	case tar.TypeChar:
+		mode |= unix.S_IFCHR
+	case tar.TypeFifo:
+		mode |= unix.S_IFIFO
+	}
+
+	return unix.Mknod(path, mode, int(unix.Mkdev(uint32(hdr.Devmajor), uint32(hdr.Devminor))))
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+	if hdr.Typeflag == tar.TypeLink {
+		if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
+			if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+				return err
+			}
+		}
+	} else if hdr.Typeflag != tar.TypeSymlink {
+		if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func getxattr(path, attr string) ([]byte, error) {
+	b, err := sysx.LGetxattr(path, attr)
+	if err == unix.ENOTSUP || err == sysx.ENODATA {
+		return nil, nil
+	}
+	return b, err
+}
+
+func setxattr(path, key, value string) error {
+	return sysx.LSetxattr(path, key, []byte(value), 0)
+}
diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go
new file mode 100644
index 0000000..cb3b6c5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go
@@ -0,0 +1,103 @@
+package archive
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"strings"
+
+	"github.com/containerd/containerd/sys"
+	"github.com/dmcgowan/go-tar"
+)
+
+// tarName returns platform-specific filepath
+// to canonical posix-style path for tar archival. p is relative
+// path.
+func tarName(p string) (string, error) {
+	// windows: convert windows style relative path with backslashes
+	// into forward slashes. Since windows does not allow '/' or '\'
+	// in file names, it is mostly safe to replace however we must
+	// check just in case
+	if strings.Contains(p, "/") {
+		return "", fmt.Errorf("Windows path contains forward slash: %s", p)
+	}
+
+	return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
+}
+
+// chmodTarEntry is used to adjust the file permissions used in tar header based
+// on the platform the archival is done.
+func chmodTarEntry(perm os.FileMode) os.FileMode {
+	perm &= 0755
+	// Add the x bit: make everything +x from windows
+	perm |= 0111
+
+	return perm
+}
+
+func setHeaderForSpecialDevice(*tar.Header, string, os.FileInfo) error {
+	// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
+	return nil
+}
+
+func open(p string) (*os.File, error) {
+	// We use sys.OpenSequential to ensure we use sequential file
+	// access on Windows to avoid depleting the standby list.
+	return sys.OpenSequential(p)
+}
+
+func openFile(name string, flag int, perm os.FileMode) (*os.File, error) {
+	// Source is regular file. We use sys.OpenFileSequential to use sequential
+	// file access to avoid depleting the standby list on Windows.
+	return sys.OpenFileSequential(name, flag, perm)
+}
+
+func mkdirAll(path string, perm os.FileMode) error {
+	return sys.MkdirAll(path, perm)
+}
+
+func mkdir(path string, perm os.FileMode) error {
+	return os.Mkdir(path, perm)
+}
+
+func skipFile(hdr *tar.Header) bool {
+	// Windows does not support filenames with colons in them. Ignore
+	// these files. This is not a problem though (although it might
+	// appear that it is). Let's suppose a client is running docker pull.
+	// The daemon it points to is Windows. Would it make sense for the
+	// client to be doing a docker pull Ubuntu for example (which has files
+	// with colons in the name under /usr/share/man/man3)? No, absolutely
+	// not as it would really only make sense that they were pulling a
+	// Windows image. However, for development, it is necessary to be able
+	// to pull Linux images which are in the repository.
+	//
+	// TODO Windows. Once the registry is aware of what images are Windows-
+	// specific or Linux-specific, this warning should be changed to an error
+	// to cater for the situation where someone does manage to upload a Linux
+	// image but have it tagged as Windows inadvertently.
+	if strings.Contains(hdr.Name, ":") {
+		return true
+	}
+
+	return false
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
+	return nil
+}
+
+func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
+	return nil
+}
+
+func getxattr(path, attr string) ([]byte, error) {
+	return nil, nil
+}
+
+func setxattr(path, key, value string) error {
+	// Return not support error, do not wrap underlying not supported
+	// since xattrs should not exist in windows diff archives
+	return errors.New("xattrs not supported on Windows")
+}
diff --git a/vendor/github.com/containerd/containerd/archive/time.go b/vendor/github.com/containerd/containerd/archive/time.go
new file mode 100644
index 0000000..4e9ae95
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/archive/time.go
@@ -0,0 +1,38 @@
+package archive
+
+import (
+	"syscall"
+	"time"
+	"unsafe"
+)
+
+var (
+	minTime = time.Unix(0, 0)
+	maxTime time.Time
+)
+
+func init() {
+	if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
+		// This is a 64 bit timespec
+		// os.Chtimes limits time to the following
+		maxTime = time.Unix(0, 1<<63-1)
+	} else {
+		// This is a 32 bit timespec
+		maxTime = time.Unix(1<<31-1, 0)
+	}
+}
+
+func boundTime(t time.Time) time.Time {
+	if t.Before(minTime) || t.After(maxTime) {
+		return minTime
+	}
+
+	return t
+}
+
+func latestTime(t1, t2 time.Time) time.Time {
+	if t1.Before(t2) {
+		return t2
+	}
+	return t1
+}
diff --git a/vendor/github.com/containerd/containerd/archive/time_darwin.go b/vendor/github.com/containerd/containerd/archive/time_darwin.go
new file mode 100644
index 0000000..2ac517a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/archive/time_darwin.go
@@ -0,0 +1,14 @@
+package archive
+
+import (
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+// as at MacOS 10.12 there is apparently no way to set timestamps
+// with nanosecond precision. We could fall back to utimes/lutimes
+// and lose the precision as a temporary workaround.
+func chtimes(path string, atime, mtime time.Time) error {
+	return errors.New("OSX missing UtimesNanoAt")
+}
diff --git a/vendor/github.com/containerd/containerd/archive/time_unix.go b/vendor/github.com/containerd/containerd/archive/time_unix.go
new file mode 100644
index 0000000..054f06e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/archive/time_unix.go
@@ -0,0 +1,23 @@
+// +build linux freebsd solaris
+
+package archive
+
+import (
+	"time"
+
+	"golang.org/x/sys/unix"
+
+	"github.com/pkg/errors"
+)
+
+func chtimes(path string, atime, mtime time.Time) error {
+	var utimes [2]unix.Timespec
+	utimes[0] = unix.NsecToTimespec(atime.UnixNano())
+	utimes[1] = unix.NsecToTimespec(mtime.UnixNano())
+
+	if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, utimes[0:], unix.AT_SYMLINK_NOFOLLOW); err != nil {
+		return errors.Wrap(err, "failed call to UtimesNanoAt")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/archive/time_windows.go b/vendor/github.com/containerd/containerd/archive/time_windows.go
new file mode 100644
index 0000000..0b18c34
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/archive/time_windows.go
@@ -0,0 +1,26 @@
+package archive
+
+import (
+	"time"
+
+	"golang.org/x/sys/windows"
+)
+
+// chtimes will set the create time on a file using the given modtime.
+// This requires calling SetFileTime and explicitly including the create time.
+func chtimes(path string, atime, mtime time.Time) error {
+	ctimespec := windows.NsecToTimespec(mtime.UnixNano())
+	pathp, e := windows.UTF16PtrFromString(path)
+	if e != nil {
+		return e
+	}
+	h, e := windows.CreateFile(pathp,
+		windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil,
+		windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0)
+	if e != nil {
+		return e
+	}
+	defer windows.Close(h)
+	c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec))
+	return windows.SetFileTime(h, &c, nil, nil)
+}
diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go
new file mode 100644
index 0000000..6f0eb28
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/client.go
@@ -0,0 +1,637 @@
+package containerd
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"runtime"
+	"strconv"
+	"sync"
+	"time"
+
+	containersapi "github.com/containerd/containerd/api/services/containers/v1"
+	contentapi "github.com/containerd/containerd/api/services/content/v1"
+	diffapi "github.com/containerd/containerd/api/services/diff/v1"
+	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	imagesapi "github.com/containerd/containerd/api/services/images/v1"
+	introspectionapi "github.com/containerd/containerd/api/services/introspection/v1"
+	namespacesapi "github.com/containerd/containerd/api/services/namespaces/v1"
+	snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
+	"github.com/containerd/containerd/api/services/tasks/v1"
+	versionservice "github.com/containerd/containerd/api/services/version/v1"
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/diff"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/containerd/plugin"
+	"github.com/containerd/containerd/reference"
+	"github.com/containerd/containerd/remotes"
+	"github.com/containerd/containerd/remotes/docker"
+	"github.com/containerd/containerd/remotes/docker/schema1"
+	contentservice "github.com/containerd/containerd/services/content"
+	diffservice "github.com/containerd/containerd/services/diff"
+	imagesservice "github.com/containerd/containerd/services/images"
+	snapshotservice "github.com/containerd/containerd/services/snapshot"
+	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/typeurl"
+	pempty "github.com/golang/protobuf/ptypes/empty"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/health/grpc_health_v1"
+)
+
+func init() {
+	const prefix = "types.containerd.io"
+	// register TypeUrls for commonly marshaled external types
+	major := strconv.Itoa(specs.VersionMajor)
+	typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
+	typeurl.Register(&specs.Process{}, prefix, "opencontainers/runtime-spec", major, "Process")
+	typeurl.Register(&specs.LinuxResources{}, prefix, "opencontainers/runtime-spec", major, "LinuxResources")
+	typeurl.Register(&specs.WindowsResources{}, prefix, "opencontainers/runtime-spec", major, "WindowsResources")
+}
+
+// New returns a new containerd client that is connected to the containerd
+// instance provided by address
+func New(address string, opts ...ClientOpt) (*Client, error) {
+	var copts clientOpts
+	for _, o := range opts {
+		if err := o(&copts); err != nil {
+			return nil, err
+		}
+	}
+	gopts := []grpc.DialOption{
+		grpc.WithBlock(),
+		grpc.WithInsecure(),
+		grpc.WithTimeout(60 * time.Second),
+		grpc.FailOnNonTempDialError(true),
+		grpc.WithBackoffMaxDelay(3 * time.Second),
+		grpc.WithDialer(Dialer),
+	}
+	if len(copts.dialOptions) > 0 {
+		gopts = copts.dialOptions
+	}
+	if copts.defaultns != "" {
+		unary, stream := newNSInterceptors(copts.defaultns)
+		gopts = append(gopts,
+			grpc.WithUnaryInterceptor(unary),
+			grpc.WithStreamInterceptor(stream),
+		)
+	}
+	conn, err := grpc.Dial(DialAddress(address), gopts...)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to dial %q", address)
+	}
+	return NewWithConn(conn, opts...)
+}
+
+// NewWithConn returns a new containerd client that is connected to the containerd
+// instance provided by the connection
+func NewWithConn(conn *grpc.ClientConn, opts ...ClientOpt) (*Client, error) {
+	return &Client{
+		conn:    conn,
+		runtime: fmt.Sprintf("%s.%s", plugin.RuntimePlugin, runtime.GOOS),
+	}, nil
+}
+
+// Client is the client to interact with containerd and its various services
+// using a uniform interface
+type Client struct {
+	conn    *grpc.ClientConn
+	runtime string
+}
+
+// IsServing returns true if the client can successfully connect to the
+// containerd daemon and the healthcheck service returns the SERVING
+// response.
+// This call will block if a transient error is encountered during
+// connection. A timeout can be set in the context to ensure it returns
+// early.
+func (c *Client) IsServing(ctx context.Context) (bool, error) {
+	r, err := c.HealthService().Check(ctx, &grpc_health_v1.HealthCheckRequest{}, grpc.FailFast(false))
+	if err != nil {
+		return false, err
+	}
+	return r.Status == grpc_health_v1.HealthCheckResponse_SERVING, nil
+}
+
+// Containers returns all containers created in containerd
+func (c *Client) Containers(ctx context.Context, filters ...string) ([]Container, error) {
+	r, err := c.ContainerService().List(ctx, filters...)
+	if err != nil {
+		return nil, err
+	}
+	var out []Container
+	for _, container := range r {
+		out = append(out, containerFromRecord(c, container))
+	}
+	return out, nil
+}
+
+// NewContainer will create a new container in container with the provided id
+// the id must be unique within the namespace
+func (c *Client) NewContainer(ctx context.Context, id string, opts ...NewContainerOpts) (Container, error) {
+	container := containers.Container{
+		ID: id,
+		Runtime: containers.RuntimeInfo{
+			Name: c.runtime,
+		},
+	}
+	for _, o := range opts {
+		if err := o(ctx, c, &container); err != nil {
+			return nil, err
+		}
+	}
+	r, err := c.ContainerService().Create(ctx, container)
+	if err != nil {
+		return nil, err
+	}
+	return containerFromRecord(c, r), nil
+}
+
+// LoadContainer loads an existing container from metadata
+func (c *Client) LoadContainer(ctx context.Context, id string) (Container, error) {
+	r, err := c.ContainerService().Get(ctx, id)
+	if err != nil {
+		return nil, err
+	}
+	return containerFromRecord(c, r), nil
+}
+
+// RemoteContext is used to configure object resolutions and transfers with
+// remote content stores and image providers.
+type RemoteContext struct {
+	// Resolver is used to resolve names to objects, fetchers, and pushers.
+	// If no resolver is provided, defaults to Docker registry resolver.
+	Resolver remotes.Resolver
+
+	// Unpack is done after an image is pulled to extract into a snapshotter.
+	// If an image is not unpacked on pull, it can be unpacked any time
+	// afterwards. Unpacking is required to run an image.
+	Unpack bool
+
+	// Snapshotter used for unpacking
+	Snapshotter string
+
+	// Labels to be applied to the created image
+	Labels map[string]string
+
+	// BaseHandlers are a set of handlers which get are called on dispatch.
+	// These handlers always get called before any operation specific
+	// handlers.
+	BaseHandlers []images.Handler
+
+	// ConvertSchema1 is whether to convert Docker registry schema 1
+	// manifests. If this option is false then any image which resolves
+	// to schema 1 will return an error since schema 1 is not supported.
+	ConvertSchema1 bool
+}
+
+func defaultRemoteContext() *RemoteContext {
+	return &RemoteContext{
+		Resolver: docker.NewResolver(docker.ResolverOptions{
+			Client: http.DefaultClient,
+		}),
+		Snapshotter: DefaultSnapshotter,
+	}
+}
+
+// Pull downloads the provided content into containerd's content store
+func (c *Client) Pull(ctx context.Context, ref string, opts ...RemoteOpt) (Image, error) {
+	pullCtx := defaultRemoteContext()
+	for _, o := range opts {
+		if err := o(c, pullCtx); err != nil {
+			return nil, err
+		}
+	}
+	store := c.ContentStore()
+
+	name, desc, err := pullCtx.Resolver.Resolve(ctx, ref)
+	if err != nil {
+		return nil, err
+	}
+	fetcher, err := pullCtx.Resolver.Fetcher(ctx, name)
+	if err != nil {
+		return nil, err
+	}
+
+	var (
+		schema1Converter *schema1.Converter
+		handler          images.Handler
+	)
+	if desc.MediaType == images.MediaTypeDockerSchema1Manifest && pullCtx.ConvertSchema1 {
+		schema1Converter = schema1.NewConverter(store, fetcher)
+		handler = images.Handlers(append(pullCtx.BaseHandlers, schema1Converter)...)
+	} else {
+		handler = images.Handlers(append(pullCtx.BaseHandlers,
+			remotes.FetchHandler(store, fetcher, desc),
+			images.ChildrenHandler(store, platforms.Default()))...,
+		)
+	}
+
+	if err := images.Dispatch(ctx, handler, desc); err != nil {
+		return nil, err
+	}
+	if schema1Converter != nil {
+		desc, err = schema1Converter.Convert(ctx)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	imgrec := images.Image{
+		Name:   name,
+		Target: desc,
+		Labels: pullCtx.Labels,
+	}
+
+	is := c.ImageService()
+	if created, err := is.Create(ctx, imgrec); err != nil {
+		if !errdefs.IsAlreadyExists(err) {
+			return nil, err
+		}
+
+		updated, err := is.Update(ctx, imgrec)
+		if err != nil {
+			return nil, err
+		}
+
+		imgrec = updated
+	} else {
+		imgrec = created
+	}
+
+	// Remove root tag from manifest now that image refers to it
+	if _, err := store.Update(ctx, content.Info{Digest: desc.Digest}, "labels.containerd.io/gc.root"); err != nil {
+		return nil, errors.Wrap(err, "failed to remove manifest root tag")
+	}
+
+	img := &image{
+		client: c,
+		i:      imgrec,
+	}
+	if pullCtx.Unpack {
+		if err := img.Unpack(ctx, pullCtx.Snapshotter); err != nil {
+			return nil, err
+		}
+	}
+	return img, nil
+}
+
+// Push uploads the provided content to a remote resource
+func (c *Client) Push(ctx context.Context, ref string, desc ocispec.Descriptor, opts ...RemoteOpt) error {
+	pushCtx := defaultRemoteContext()
+	for _, o := range opts {
+		if err := o(c, pushCtx); err != nil {
+			return err
+		}
+	}
+
+	pusher, err := pushCtx.Resolver.Pusher(ctx, ref)
+	if err != nil {
+		return err
+	}
+
+	var m sync.Mutex
+	manifestStack := []ocispec.Descriptor{}
+
+	filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		switch desc.MediaType {
+		case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
+			images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+			m.Lock()
+			manifestStack = append(manifestStack, desc)
+			m.Unlock()
+			return nil, images.ErrStopHandler
+		default:
+			return nil, nil
+		}
+	})
+
+	cs := c.ContentStore()
+	pushHandler := remotes.PushHandler(cs, pusher)
+
+	handlers := append(pushCtx.BaseHandlers,
+		images.ChildrenHandler(cs, platforms.Default()),
+		filterHandler,
+		pushHandler,
+	)
+
+	if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil {
+		return err
+	}
+
+	// Iterate in reverse order as seen, parent always uploaded after child
+	for i := len(manifestStack) - 1; i >= 0; i-- {
+		_, err := pushHandler(ctx, manifestStack[i])
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// GetImage returns an existing image
+func (c *Client) GetImage(ctx context.Context, ref string) (Image, error) {
+	i, err := c.ImageService().Get(ctx, ref)
+	if err != nil {
+		return nil, err
+	}
+	return &image{
+		client: c,
+		i:      i,
+	}, nil
+}
+
+// ListImages returns all existing images
+func (c *Client) ListImages(ctx context.Context, filters ...string) ([]Image, error) {
+	imgs, err := c.ImageService().List(ctx, filters...)
+	if err != nil {
+		return nil, err
+	}
+	images := make([]Image, len(imgs))
+	for i, img := range imgs {
+		images[i] = &image{
+			client: c,
+			i:      img,
+		}
+	}
+	return images, nil
+}
+
+// Subscribe to events that match one or more of the provided filters.
+//
+// Callers should listen on both the envelope channel and errs channel. If the
+// errs channel returns nil or an error, the subscriber should terminate.
+//
+// To cancel shutdown reciept of events, cancel the provided context. The errs
+// channel will be closed and return a nil error.
+func (c *Client) Subscribe(ctx context.Context, filters ...string) (ch <-chan *eventsapi.Envelope, errs <-chan error) {
+	var (
+		evq  = make(chan *eventsapi.Envelope)
+		errq = make(chan error, 1)
+	)
+
+	errs = errq
+	ch = evq
+
+	session, err := c.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
+		Filters: filters,
+	})
+	if err != nil {
+		errq <- err
+		close(errq)
+		return
+	}
+
+	go func() {
+		defer close(errq)
+
+		for {
+			ev, err := session.Recv()
+			if err != nil {
+				errq <- err
+				return
+			}
+
+			select {
+			case evq <- ev:
+			case <-ctx.Done():
+				return
+			}
+		}
+	}()
+
+	return ch, errs
+}
+
+// Close closes the clients connection to containerd
+func (c *Client) Close() error {
+	return c.conn.Close()
+}
+
+// NamespaceService returns the underlying NamespacesClient
+func (c *Client) NamespaceService() namespacesapi.NamespacesClient {
+	return namespacesapi.NewNamespacesClient(c.conn)
+}
+
+// ContainerService returns the underlying container Store
+func (c *Client) ContainerService() containers.Store {
+	return NewRemoteContainerStore(containersapi.NewContainersClient(c.conn))
+}
+
+// ContentStore returns the underlying content Store
+func (c *Client) ContentStore() content.Store {
+	return contentservice.NewStoreFromClient(contentapi.NewContentClient(c.conn))
+}
+
+// SnapshotService returns the underlying snapshotter for the provided snapshotter name
+func (c *Client) SnapshotService(snapshotterName string) snapshot.Snapshotter {
+	return snapshotservice.NewSnapshotterFromClient(snapshotapi.NewSnapshotsClient(c.conn), snapshotterName)
+}
+
+// TaskService returns the underlying TasksClient
+func (c *Client) TaskService() tasks.TasksClient {
+	return tasks.NewTasksClient(c.conn)
+}
+
+// ImageService returns the underlying image Store
+func (c *Client) ImageService() images.Store {
+	return imagesservice.NewStoreFromClient(imagesapi.NewImagesClient(c.conn))
+}
+
+// DiffService returns the underlying Differ
+func (c *Client) DiffService() diff.Differ {
+	return diffservice.NewDiffServiceFromClient(diffapi.NewDiffClient(c.conn))
+}
+
+func (c *Client) IntrospectionService() introspectionapi.IntrospectionClient {
+	return introspectionapi.NewIntrospectionClient(c.conn)
+}
+
+// HealthService returns the underlying GRPC HealthClient
+func (c *Client) HealthService() grpc_health_v1.HealthClient {
+	return grpc_health_v1.NewHealthClient(c.conn)
+}
+
+// EventService returns the underlying EventsClient
+func (c *Client) EventService() eventsapi.EventsClient {
+	return eventsapi.NewEventsClient(c.conn)
+}
+
+// VersionService returns the underlying VersionClient
+func (c *Client) VersionService() versionservice.VersionClient {
+	return versionservice.NewVersionClient(c.conn)
+}
+
+// Version of containerd
+type Version struct {
+	// Version number
+	Version string
+	// Revision from git that was built
+	Revision string
+}
+
+// Version returns the version of containerd that the client is connected to
+func (c *Client) Version(ctx context.Context) (Version, error) {
+	response, err := c.VersionService().Version(ctx, &pempty.Empty{})
+	if err != nil {
+		return Version{}, err
+	}
+	return Version{
+		Version:  response.Version,
+		Revision: response.Revision,
+	}, nil
+}
+
+type imageFormat string
+
+const (
+	ociImageFormat imageFormat = "oci"
+)
+
+type importOpts struct {
+	format    imageFormat
+	refObject string
+	labels    map[string]string
+}
+
+// ImportOpt allows the caller to specify import specific options
+type ImportOpt func(c *importOpts) error
+
+// WithImportLabel sets a label to be associated with an imported image
+func WithImportLabel(key, value string) ImportOpt {
+	return func(opts *importOpts) error {
+		if opts.labels == nil {
+			opts.labels = make(map[string]string)
+		}
+
+		opts.labels[key] = value
+		return nil
+	}
+}
+
+// WithImportLabels associates a set of labels to an imported image
+func WithImportLabels(labels map[string]string) ImportOpt {
+	return func(opts *importOpts) error {
+		if opts.labels == nil {
+			opts.labels = make(map[string]string)
+		}
+
+		for k, v := range labels {
+			opts.labels[k] = v
+		}
+		return nil
+	}
+}
+
+// WithOCIImportFormat sets the import format for an OCI image format
+func WithOCIImportFormat() ImportOpt {
+	return func(c *importOpts) error {
+		if c.format != "" {
+			return errors.New("format already set")
+		}
+		c.format = ociImageFormat
+		return nil
+	}
+}
+
+// WithRefObject specifies the ref object to import.
+// If refObject is empty, it is copied from the ref argument of Import().
+func WithRefObject(refObject string) ImportOpt {
+	return func(c *importOpts) error {
+		c.refObject = refObject
+		return nil
+	}
+}
+
+func resolveImportOpt(ref string, opts ...ImportOpt) (importOpts, error) {
+	var iopts importOpts
+	for _, o := range opts {
+		if err := o(&iopts); err != nil {
+			return iopts, err
+		}
+	}
+	// use OCI as the default format
+	if iopts.format == "" {
+		iopts.format = ociImageFormat
+	}
+	// if refObject is not explicitly specified, use the one specified in ref
+	if iopts.refObject == "" {
+		refSpec, err := reference.Parse(ref)
+		if err != nil {
+			return iopts, err
+		}
+		iopts.refObject = refSpec.Object
+	}
+	return iopts, nil
+}
+
+// Import imports an image from a Tar stream using reader.
+// OCI format is assumed by default.
+//
+// Note that unreferenced blobs are imported to the content store as well.
+func (c *Client) Import(ctx context.Context, ref string, reader io.Reader, opts ...ImportOpt) (Image, error) {
+	iopts, err := resolveImportOpt(ref, opts...)
+	if err != nil {
+		return nil, err
+	}
+	switch iopts.format {
+	case ociImageFormat:
+		return c.importFromOCITar(ctx, ref, reader, iopts)
+	default:
+		return nil, errors.Errorf("unsupported format: %s", iopts.format)
+	}
+}
+
+type exportOpts struct {
+	format imageFormat
+}
+
+// ExportOpt allows callers to set export options
+type ExportOpt func(c *exportOpts) error
+
+// WithOCIExportFormat sets the OCI image format as the export target
+func WithOCIExportFormat() ExportOpt {
+	return func(c *exportOpts) error {
+		if c.format != "" {
+			return errors.New("format already set")
+		}
+		c.format = ociImageFormat
+		return nil
+	}
+}
+
+// TODO: add WithMediaTypeTranslation that transforms media types according to the format.
+// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
+//      -> application/vnd.oci.image.layer.v1.tar+gzip
+
+// Export exports an image to a Tar stream.
+// OCI format is used by default.
+// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
+func (c *Client) Export(ctx context.Context, desc ocispec.Descriptor, opts ...ExportOpt) (io.ReadCloser, error) {
+	var eopts exportOpts
+	for _, o := range opts {
+		if err := o(&eopts); err != nil {
+			return nil, err
+		}
+	}
+	// use OCI as the default format
+	if eopts.format == "" {
+		eopts.format = ociImageFormat
+	}
+	pr, pw := io.Pipe()
+	switch eopts.format {
+	case ociImageFormat:
+		go func() {
+			pw.CloseWithError(c.exportToOCITar(ctx, desc, pw, eopts))
+		}()
+	default:
+		return nil, errors.Errorf("unsupported format: %s", eopts.format)
+	}
+	return pr, nil
+}
diff --git a/vendor/github.com/containerd/containerd/client_opts.go b/vendor/github.com/containerd/containerd/client_opts.go
new file mode 100644
index 0000000..c1e93ba
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/client_opts.go
@@ -0,0 +1,103 @@
+package containerd
+
+import (
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/remotes"
+	"google.golang.org/grpc"
+)
+
+type clientOpts struct {
+	defaultns   string
+	dialOptions []grpc.DialOption
+}
+
+// ClientOpt allows callers to set options on the containerd client
+type ClientOpt func(c *clientOpts) error
+
+// WithDefaultNamespace sets the default namespace on the client
+//
+// Any operation that does not have a namespace set on the context will
+// be provided the default namespace
+func WithDefaultNamespace(ns string) ClientOpt {
+	return func(c *clientOpts) error {
+		c.defaultns = ns
+		return nil
+	}
+}
+
+// WithDialOpts allows grpc.DialOptions to be set on the connection
+func WithDialOpts(opts []grpc.DialOption) ClientOpt {
+	return func(c *clientOpts) error {
+		c.dialOptions = opts
+		return nil
+	}
+}
+
+// RemoteOpt allows the caller to set distribution options for a remote
+type RemoteOpt func(*Client, *RemoteContext) error
+
+// WithPullUnpack is used to unpack an image after pull. This
+// uses the snapshotter, content store, and diff service
+// configured for the client.
+func WithPullUnpack(_ *Client, c *RemoteContext) error {
+	c.Unpack = true
+	return nil
+}
+
+// WithPullSnapshotter specifies snapshotter name used for unpacking
+func WithPullSnapshotter(snapshotterName string) RemoteOpt {
+	return func(_ *Client, c *RemoteContext) error {
+		c.Snapshotter = snapshotterName
+		return nil
+	}
+}
+
+// WithPullLabel sets a label to be associated with a pulled reference
+func WithPullLabel(key, value string) RemoteOpt {
+	return func(_ *Client, rc *RemoteContext) error {
+		if rc.Labels == nil {
+			rc.Labels = make(map[string]string)
+		}
+
+		rc.Labels[key] = value
+		return nil
+	}
+}
+
+// WithPullLabels associates a set of labels to a pulled reference
+func WithPullLabels(labels map[string]string) RemoteOpt {
+	return func(_ *Client, rc *RemoteContext) error {
+		if rc.Labels == nil {
+			rc.Labels = make(map[string]string)
+		}
+
+		for k, v := range labels {
+			rc.Labels[k] = v
+		}
+		return nil
+	}
+}
+
+// WithSchema1Conversion is used to convert Docker registry schema 1
+// manifests to oci manifests on pull. Without this option schema 1
+// manifests will return a not supported error.
+func WithSchema1Conversion(client *Client, c *RemoteContext) error {
+	c.ConvertSchema1 = true
+	return nil
+}
+
+// WithResolver specifies the resolver to use.
+func WithResolver(resolver remotes.Resolver) RemoteOpt {
+	return func(client *Client, c *RemoteContext) error {
+		c.Resolver = resolver
+		return nil
+	}
+}
+
+// WithImageHandler adds a base handler to be called on dispatch.
+func WithImageHandler(h images.Handler) RemoteOpt {
+	return func(client *Client, c *RemoteContext) error {
+		c.BaseHandlers = append(c.BaseHandlers, h)
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go
new file mode 100644
index 0000000..a9750ec
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/container.go
@@ -0,0 +1,309 @@
+package containerd
+
+import (
+	"context"
+	"encoding/json"
+	"path/filepath"
+	"strings"
+
+	"github.com/containerd/containerd/api/services/tasks/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/typeurl"
+	prototypes "github.com/gogo/protobuf/types"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+// Container is a metadata object for container resources and task creation
+type Container interface {
+	// ID identifies the container
+	ID() string
+	// Info returns the underlying container record type
+	Info(context.Context) (containers.Container, error)
+	// Delete removes the container
+	Delete(context.Context, ...DeleteOpts) error
+	// NewTask creates a new task based on the container metadata
+	NewTask(context.Context, IOCreation, ...NewTaskOpts) (Task, error)
+	// Spec returns the OCI runtime specification
+	Spec(context.Context) (*specs.Spec, error)
+	// Task returns the current task for the container
+	//
+	// If IOAttach options are passed the client will reattach to the IO for the running
+	// task. If no task exists for the container a NotFound error is returned
+	//
+	// Clients must make sure that only one reader is attached to the task and consuming
+	// the output from the task's fifos
+	Task(context.Context, IOAttach) (Task, error)
+	// Image returns the image that the container is based on
+	Image(context.Context) (Image, error)
+	// Labels returns the labels set on the container
+	Labels(context.Context) (map[string]string, error)
+	// SetLabels sets the provided labels for the container and returns the final label set
+	SetLabels(context.Context, map[string]string) (map[string]string, error)
+	// Extensions returns the extensions set on the container
+	Extensions(context.Context) (map[string]prototypes.Any, error)
+	// Update a container
+	Update(context.Context, ...UpdateContainerOpts) error
+}
+
+func containerFromRecord(client *Client, c containers.Container) *container {
+	return &container{
+		client: client,
+		id:     c.ID,
+	}
+}
+
+var _ = (Container)(&container{})
+
+type container struct {
+	client *Client
+	id     string
+}
+
+// ID returns the container's unique id
+func (c *container) ID() string {
+	return c.id
+}
+
+func (c *container) Info(ctx context.Context) (containers.Container, error) {
+	return c.get(ctx)
+}
+
+func (c *container) Extensions(ctx context.Context) (map[string]prototypes.Any, error) {
+	r, err := c.get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return r.Extensions, nil
+}
+
+func (c *container) Labels(ctx context.Context) (map[string]string, error) {
+	r, err := c.get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return r.Labels, nil
+}
+
+func (c *container) SetLabels(ctx context.Context, labels map[string]string) (map[string]string, error) {
+	container := containers.Container{
+		ID:     c.id,
+		Labels: labels,
+	}
+
+	var paths []string
+	// mask off paths so we only muck with the labels encountered in labels.
+	// Labels not in the passed in argument will be left alone.
+	for k := range labels {
+		paths = append(paths, strings.Join([]string{"labels", k}, "."))
+	}
+
+	r, err := c.client.ContainerService().Update(ctx, container, paths...)
+	if err != nil {
+		return nil, err
+	}
+	return r.Labels, nil
+}
+
+// Spec returns the current OCI specification for the container
+func (c *container) Spec(ctx context.Context) (*specs.Spec, error) {
+	r, err := c.get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	var s specs.Spec
+	if err := json.Unmarshal(r.Spec.Value, &s); err != nil {
+		return nil, err
+	}
+	return &s, nil
+}
+
+// Delete deletes an existing container
+// an error is returned if the container has running tasks
+func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) error {
+	if _, err := c.loadTask(ctx, nil); err == nil {
+		return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.id)
+	}
+	r, err := c.get(ctx)
+	if err != nil {
+		return err
+	}
+	for _, o := range opts {
+		if err := o(ctx, c.client, r); err != nil {
+			return err
+		}
+	}
+	return c.client.ContainerService().Delete(ctx, c.id)
+}
+
+func (c *container) Task(ctx context.Context, attach IOAttach) (Task, error) {
+	return c.loadTask(ctx, attach)
+}
+
+// Image returns the image that the container is based on
+func (c *container) Image(ctx context.Context) (Image, error) {
+	r, err := c.get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	if r.Image == "" {
+		return nil, errors.Wrap(errdefs.ErrNotFound, "container not created from an image")
+	}
+	i, err := c.client.ImageService().Get(ctx, r.Image)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to get image %s for container", r.Image)
+	}
+	return &image{
+		client: c.client,
+		i:      i,
+	}, nil
+}
+
+func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) {
+	i, err := ioCreate(c.id)
+	if err != nil {
+		return nil, err
+	}
+	cfg := i.Config()
+	request := &tasks.CreateTaskRequest{
+		ContainerID: c.id,
+		Terminal:    cfg.Terminal,
+		Stdin:       cfg.Stdin,
+		Stdout:      cfg.Stdout,
+		Stderr:      cfg.Stderr,
+	}
+	r, err := c.get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	if r.SnapshotKey != "" {
+		if r.Snapshotter == "" {
+			return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "unable to resolve rootfs mounts without snapshotter on container")
+		}
+
+		// get the rootfs from the snapshotter and add it to the request
+		mounts, err := c.client.SnapshotService(r.Snapshotter).Mounts(ctx, r.SnapshotKey)
+		if err != nil {
+			return nil, err
+		}
+		for _, m := range mounts {
+			request.Rootfs = append(request.Rootfs, &types.Mount{
+				Type:    m.Type,
+				Source:  m.Source,
+				Options: m.Options,
+			})
+		}
+	}
+	var info TaskInfo
+	for _, o := range opts {
+		if err := o(ctx, c.client, &info); err != nil {
+			return nil, err
+		}
+	}
+	if info.RootFS != nil {
+		for _, m := range info.RootFS {
+			request.Rootfs = append(request.Rootfs, &types.Mount{
+				Type:    m.Type,
+				Source:  m.Source,
+				Options: m.Options,
+			})
+		}
+	}
+	if info.Options != nil {
+		any, err := typeurl.MarshalAny(info.Options)
+		if err != nil {
+			return nil, err
+		}
+		request.Options = any
+	}
+	t := &task{
+		client: c.client,
+		io:     i,
+		id:     c.id,
+	}
+	if info.Checkpoint != nil {
+		request.Checkpoint = info.Checkpoint
+	}
+	response, err := c.client.TaskService().Create(ctx, request)
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	t.pid = response.Pid
+	return t, nil
+}
+
+func (c *container) Update(ctx context.Context, opts ...UpdateContainerOpts) error {
+	// fetch the current container config before updating it
+	r, err := c.get(ctx)
+	if err != nil {
+		return err
+	}
+	for _, o := range opts {
+		if err := o(ctx, c.client, &r); err != nil {
+			return err
+		}
+	}
+	if _, err := c.client.ContainerService().Update(ctx, r); err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return nil
+}
+
+func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, error) {
+	response, err := c.client.TaskService().Get(ctx, &tasks.GetRequest{
+		ContainerID: c.id,
+	})
+	if err != nil {
+		err = errdefs.FromGRPC(err)
+		if errdefs.IsNotFound(err) {
+			return nil, errors.Wrapf(err, "no running task found")
+		}
+		return nil, err
+	}
+	var i IO
+	if ioAttach != nil {
+		if i, err = attachExistingIO(response, ioAttach); err != nil {
+			return nil, err
+		}
+	}
+	t := &task{
+		client: c.client,
+		io:     i,
+		id:     response.Process.ID,
+		pid:    response.Process.Pid,
+	}
+	return t, nil
+}
+
+func (c *container) get(ctx context.Context) (containers.Container, error) {
+	return c.client.ContainerService().Get(ctx, c.id)
+}
+
+func attachExistingIO(response *tasks.GetResponse, ioAttach IOAttach) (IO, error) {
+	// get the existing fifo paths from the task information stored by the daemon
+	paths := &FIFOSet{
+		Dir: getFifoDir([]string{
+			response.Process.Stdin,
+			response.Process.Stdout,
+			response.Process.Stderr,
+		}),
+		In:       response.Process.Stdin,
+		Out:      response.Process.Stdout,
+		Err:      response.Process.Stderr,
+		Terminal: response.Process.Terminal,
+	}
+	return ioAttach(paths)
+}
+
+// getFifoDir looks for any non-empty path for a stdio fifo
+// and returns the dir for where it is located
+func getFifoDir(paths []string) string {
+	for _, p := range paths {
+		if p != "" {
+			return filepath.Dir(p)
+		}
+	}
+	return ""
+}
diff --git a/vendor/github.com/containerd/containerd/container_opts.go b/vendor/github.com/containerd/containerd/container_opts.go
new file mode 100644
index 0000000..57ffc0a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/container_opts.go
@@ -0,0 +1,174 @@
+package containerd
+
+import (
+	"context"
+	"time"
+
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/containerd/snapshot"
+	"github.com/containerd/typeurl"
+	"github.com/gogo/protobuf/types"
+	"github.com/opencontainers/image-spec/identity"
+	"github.com/pkg/errors"
+)
+
+// DeleteOpts allows the caller to set options for the deletion of a container
+type DeleteOpts func(ctx context.Context, client *Client, c containers.Container) error
+
+// NewContainerOpts allows the caller to set additional options when creating a container
+type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
+
+// UpdateContainerOpts allows the caller to set additional options when updating a container
+type UpdateContainerOpts func(ctx context.Context, client *Client, c *containers.Container) error
+
+// WithRuntime allows a user to specify the runtime name and additional options that should
+// be used to create tasks for the container
+func WithRuntime(name string, options interface{}) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		var (
+			any *types.Any
+			err error
+		)
+		if options != nil {
+			any, err = typeurl.MarshalAny(options)
+			if err != nil {
+				return err
+			}
+		}
+		c.Runtime = containers.RuntimeInfo{
+			Name:    name,
+			Options: any,
+		}
+		return nil
+	}
+}
+
+// WithImage sets the provided image as the base for the container
+func WithImage(i Image) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		c.Image = i.Name()
+		return nil
+	}
+}
+
+// WithContainerLabels adds the provided labels to the container
+func WithContainerLabels(labels map[string]string) NewContainerOpts {
+	return func(_ context.Context, _ *Client, c *containers.Container) error {
+		c.Labels = labels
+		return nil
+	}
+}
+
+// WithSnapshotter sets the provided snapshotter for use by the container
+//
+// This option must appear before other snapshotter options to have an effect.
+func WithSnapshotter(name string) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		c.Snapshotter = name
+		return nil
+	}
+}
+
+// WithSnapshot uses an existing root filesystem for the container
+func WithSnapshot(id string) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		setSnapshotterIfEmpty(c)
+		// check that the snapshot exists, if not, fail on creation
+		if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
+			return err
+		}
+		c.SnapshotKey = id
+		return nil
+	}
+}
+
+// WithNewSnapshot allocates a new snapshot to be used by the container as the
+// root filesystem in read-write mode
+func WithNewSnapshot(id string, i Image) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		if err != nil {
+			return err
+		}
+		setSnapshotterIfEmpty(c)
+		labels := map[string]string{
+			"containerd.io/gc.root": time.Now().String(),
+		}
+		parent := identity.ChainID(diffIDs).String()
+		if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, parent, snapshot.WithLabels(labels)); err != nil {
+			return err
+		}
+		c.SnapshotKey = id
+		c.Image = i.Name()
+		return nil
+	}
+}
+
+// WithSnapshotCleanup deletes the rootfs snapshot allocated for the container
+func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
+	if c.SnapshotKey != "" {
+		if c.Snapshotter == "" {
+			return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
+		}
+		return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey)
+	}
+	return nil
+}
+
+// WithNewSnapshotView allocates a new snapshot to be used by the container as the
+// root filesystem in read-only mode
+func WithNewSnapshotView(id string, i Image) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		if err != nil {
+			return err
+		}
+		setSnapshotterIfEmpty(c)
+		labels := map[string]string{
+			"containerd.io/gc.root": time.Now().String(),
+		}
+		parent := identity.ChainID(diffIDs).String()
+		if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, parent, snapshot.WithLabels(labels)); err != nil {
+			return err
+		}
+		c.SnapshotKey = id
+		c.Image = i.Name()
+		return nil
+	}
+}
+
+func setSnapshotterIfEmpty(c *containers.Container) {
+	if c.Snapshotter == "" {
+		c.Snapshotter = DefaultSnapshotter
+	}
+}
+
+// WithContainerExtension appends extension data to the container object.
+// Use this to decorate the container object with additional data for the client
+// integration.
+//
+// Make sure to register the type of `extension` in the typeurl package via
+// `typeurl.Register` or container creation may fail.
+func WithContainerExtension(name string, extension interface{}) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		if name == "" {
+			return errors.Wrapf(errdefs.ErrInvalidArgument, "extension key must not be zero-length")
+		}
+
+		any, err := typeurl.MarshalAny(extension)
+		if err != nil {
+			if errors.Cause(err) == typeurl.ErrNotFound {
+				return errors.Wrapf(err, "extension %q is not registered with the typeurl package, see `typeurl.Register`", name)
+			}
+			return errors.Wrap(err, "error marshalling extension")
+		}
+
+		if c.Extensions == nil {
+			c.Extensions = make(map[string]types.Any)
+		}
+		c.Extensions[name] = *any
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/container_opts_unix.go b/vendor/github.com/containerd/containerd/container_opts_unix.go
new file mode 100644
index 0000000..e2cd7c9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/container_opts_unix.go
@@ -0,0 +1,124 @@
+// +build !windows
+
+package containerd
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	"github.com/gogo/protobuf/proto"
+	protobuf "github.com/gogo/protobuf/types"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/opencontainers/image-spec/identity"
+	"github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+// WithCheckpoint allows a container to be created from the checkpointed information
+// provided by the descriptor. The image, snapshot, and runtime specifications are
+// restored on the container
+func WithCheckpoint(im Image, snapshotKey string) NewContainerOpts {
+	// set image and rw, and spec
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		var (
+			desc  = im.Target()
+			id    = desc.Digest
+			store = client.ContentStore()
+		)
+		index, err := decodeIndex(ctx, store, id)
+		if err != nil {
+			return err
+		}
+		var rw *v1.Descriptor
+		for _, m := range index.Manifests {
+			switch m.MediaType {
+			case v1.MediaTypeImageLayer:
+				fk := m
+				rw = &fk
+			case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList:
+				config, err := images.Config(ctx, store, m, platforms.Default())
+				if err != nil {
+					return errors.Wrap(err, "unable to resolve image config")
+				}
+				diffIDs, err := images.RootFS(ctx, store, config)
+				if err != nil {
+					return errors.Wrap(err, "unable to get rootfs")
+				}
+				setSnapshotterIfEmpty(c)
+				if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, snapshotKey, identity.ChainID(diffIDs).String()); err != nil {
+					if !errdefs.IsAlreadyExists(err) {
+						return err
+					}
+				}
+				c.Image = index.Annotations["image.name"]
+			case images.MediaTypeContainerd1CheckpointConfig:
+				data, err := content.ReadBlob(ctx, store, m.Digest)
+				if err != nil {
+					return errors.Wrap(err, "unable to read checkpoint config")
+				}
+				var any protobuf.Any
+				if err := proto.Unmarshal(data, &any); err != nil {
+					return err
+				}
+				c.Spec = &any
+			}
+		}
+		if rw != nil {
+			// apply the rw snapshot to the new rw layer
+			mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, snapshotKey)
+			if err != nil {
+				return errors.Wrapf(err, "unable to get mounts for %s", snapshotKey)
+			}
+			if _, err := client.DiffService().Apply(ctx, *rw, mounts); err != nil {
+				return errors.Wrap(err, "unable to apply rw diff")
+			}
+		}
+		c.SnapshotKey = snapshotKey
+		return nil
+	}
+}
+
+// WithTaskCheckpoint allows a task to be created with live runtime and memory data from a
+// previous checkpoint. Additional software such as CRIU may be required to
+// restore a task from a checkpoint
+func WithTaskCheckpoint(im Image) NewTaskOpts {
+	return func(ctx context.Context, c *Client, info *TaskInfo) error {
+		desc := im.Target()
+		id := desc.Digest
+		index, err := decodeIndex(ctx, c.ContentStore(), id)
+		if err != nil {
+			return err
+		}
+		for _, m := range index.Manifests {
+			if m.MediaType == images.MediaTypeContainerd1Checkpoint {
+				info.Checkpoint = &types.Descriptor{
+					MediaType: m.MediaType,
+					Size_:     m.Size,
+					Digest:    m.Digest,
+				}
+				return nil
+			}
+		}
+		return fmt.Errorf("checkpoint not found in index %s", id)
+	}
+}
+
+func decodeIndex(ctx context.Context, store content.Store, id digest.Digest) (*v1.Index, error) {
+	var index v1.Index
+	p, err := content.ReadBlob(ctx, store, id)
+	if err != nil {
+		return nil, err
+	}
+	if err := json.Unmarshal(p, &index); err != nil {
+		return nil, err
+	}
+
+	return &index, nil
+}
diff --git a/vendor/github.com/containerd/containerd/containers/containers.go b/vendor/github.com/containerd/containerd/containers/containers.go
new file mode 100644
index 0000000..df4ad83
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/containers/containers.go
@@ -0,0 +1,92 @@
+package containers
+
+import (
+	"context"
+	"time"
+
+	"github.com/gogo/protobuf/types"
+)
+
+// Container represents the set of data pinned by a container. Unless otherwise
+// noted, the resources here are considered in use by the container.
+//
+// The resources specified in this object are used to create tasks from the container.
+type Container struct {
+	// ID uniquely identifies the container in a nameapace.
+	//
+	// This property is required and cannot be changed after creation.
+	ID string
+
+	// Labels provide metadata extension for a contaienr.
+	//
+	// These are optional and fully mutable.
+	Labels map[string]string
+
+	// Image specifies the image reference used for a container.
+	//
+	// This property is optional but immutable.
+	Image string
+
+	// Runtime specifies which runtime should be used when launching container
+	// tasks.
+	//
+	// This property is required and immutable.
+	Runtime RuntimeInfo
+
+	// Spec should carry the the runtime specification used to implement the
+	// container.
+	//
+	// This field is required but mutable.
+	Spec *types.Any
+
+	// SnapshotKey specifies the snapshot key to use for the container's root
+	// filesystem. When starting a task from this container, a caller should
+	// look up the mounts from the snapshot service and include those on the
+	// task create request.
+	//
+	// This field is not required but immutable.
+	SnapshotKey string
+
+	// Snapshotter specifies the snapshotter name used for rootfs
+	//
+	// This field is not required but immutable.
+	Snapshotter string
+
+	// CreatedAt is the time at which the container was created.
+	CreatedAt time.Time
+
+	// UpdatedAt is the time at which the container was updated.
+	UpdatedAt time.Time
+
+	// Extensions stores client-specified metadata
+	Extensions map[string]types.Any
+}
+
+// RuntimeInfo holds runtime specific information
+type RuntimeInfo struct {
+	Name    string
+	Options *types.Any
+}
+
+// Store interacts with the underlying container storage
+type Store interface {
+	Get(ctx context.Context, id string) (Container, error)
+
+	// List returns containers that match one or more of the provided filters.
+	List(ctx context.Context, filters ...string) ([]Container, error)
+
+	// Create a container in the store from the provided container.
+	Create(ctx context.Context, container Container) (Container, error)
+
+	// Update the container with the provided container object. ID must be set.
+	//
+	// If one or more fieldpaths are provided, only the field corresponding to
+	// the fieldpaths will be mutated.
+	Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
+
+	// Delete a container using the id.
+	//
+	// nil will be returned on success. If the container is not known to the
+	// store, ErrNotFound will be returned.
+	Delete(ctx context.Context, id string) error
+}
diff --git a/vendor/github.com/containerd/containerd/containerstore.go b/vendor/github.com/containerd/containerd/containerstore.go
new file mode 100644
index 0000000..4db2350
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/containerstore.go
@@ -0,0 +1,135 @@
+package containerd
+
+import (
+	"context"
+
+	containersapi "github.com/containerd/containerd/api/services/containers/v1"
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/errdefs"
+	ptypes "github.com/gogo/protobuf/types"
+)
+
+type remoteContainers struct {
+	client containersapi.ContainersClient
+}
+
+var _ containers.Store = &remoteContainers{}
+
+// NewRemoteContainerStore returns the container Store connected with the provided client
+func NewRemoteContainerStore(client containersapi.ContainersClient) containers.Store {
+	return &remoteContainers{
+		client: client,
+	}
+}
+
+func (r *remoteContainers) Get(ctx context.Context, id string) (containers.Container, error) {
+	resp, err := r.client.Get(ctx, &containersapi.GetContainerRequest{
+		ID: id,
+	})
+	if err != nil {
+		return containers.Container{}, errdefs.FromGRPC(err)
+	}
+
+	return containerFromProto(&resp.Container), nil
+}
+
+func (r *remoteContainers) List(ctx context.Context, filters ...string) ([]containers.Container, error) {
+	resp, err := r.client.List(ctx, &containersapi.ListContainersRequest{
+		Filters: filters,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+
+	return containersFromProto(resp.Containers), nil
+
+}
+
+func (r *remoteContainers) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
+	created, err := r.client.Create(ctx, &containersapi.CreateContainerRequest{
+		Container: containerToProto(&container),
+	})
+	if err != nil {
+		return containers.Container{}, errdefs.FromGRPC(err)
+	}
+
+	return containerFromProto(&created.Container), nil
+
+}
+
+func (r *remoteContainers) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
+	var updateMask *ptypes.FieldMask
+	if len(fieldpaths) > 0 {
+		updateMask = &ptypes.FieldMask{
+			Paths: fieldpaths,
+		}
+	}
+
+	updated, err := r.client.Update(ctx, &containersapi.UpdateContainerRequest{
+		Container:  containerToProto(&container),
+		UpdateMask: updateMask,
+	})
+	if err != nil {
+		return containers.Container{}, errdefs.FromGRPC(err)
+	}
+
+	return containerFromProto(&updated.Container), nil
+
+}
+
+func (r *remoteContainers) Delete(ctx context.Context, id string) error {
+	_, err := r.client.Delete(ctx, &containersapi.DeleteContainerRequest{
+		ID: id,
+	})
+
+	return errdefs.FromGRPC(err)
+
+}
+
+func containerToProto(container *containers.Container) containersapi.Container {
+	return containersapi.Container{
+		ID:     container.ID,
+		Labels: container.Labels,
+		Image:  container.Image,
+		Runtime: &containersapi.Container_Runtime{
+			Name:    container.Runtime.Name,
+			Options: container.Runtime.Options,
+		},
+		Spec:        container.Spec,
+		Snapshotter: container.Snapshotter,
+		SnapshotKey: container.SnapshotKey,
+		Extensions:  container.Extensions,
+	}
+}
+
+func containerFromProto(containerpb *containersapi.Container) containers.Container {
+	var runtime containers.RuntimeInfo
+	if containerpb.Runtime != nil {
+		runtime = containers.RuntimeInfo{
+			Name:    containerpb.Runtime.Name,
+			Options: containerpb.Runtime.Options,
+		}
+	}
+	return containers.Container{
+		ID:          containerpb.ID,
+		Labels:      containerpb.Labels,
+		Image:       containerpb.Image,
+		Runtime:     runtime,
+		Spec:        containerpb.Spec,
+		Snapshotter: containerpb.Snapshotter,
+		SnapshotKey: containerpb.SnapshotKey,
+		CreatedAt:   containerpb.CreatedAt,
+		UpdatedAt:   containerpb.UpdatedAt,
+		Extensions:  containerpb.Extensions,
+	}
+}
+
+func containersFromProto(containerspb []containersapi.Container) []containers.Container {
+	var containers []containers.Container
+
+	for _, container := range containerspb {
+		containers = append(containers, containerFromProto(&container))
+	}
+
+	return containers
+}
diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go
new file mode 100644
index 0000000..05fd4ae
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/content/content.go
@@ -0,0 +1,128 @@
+package content
+
+import (
+	"context"
+	"io"
+	"time"
+
+	"github.com/opencontainers/go-digest"
+)
+
+// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer
+type ReaderAt interface {
+	io.ReaderAt
+	io.Closer
+	Size() int64
+}
+
+// Provider provides a reader interface for specific content
+type Provider interface {
+	ReaderAt(ctx context.Context, dgst digest.Digest) (ReaderAt, error)
+}
+
+// Ingester writes content
+type Ingester interface {
+	Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (Writer, error)
+}
+
+// Info holds content specific information
+//
+// TODO(stevvooe): Consider a very different name for this struct. Info is way
+// to general. It also reads very weird in certain context, like pluralization.
+type Info struct {
+	Digest    digest.Digest
+	Size      int64
+	CreatedAt time.Time
+	UpdatedAt time.Time
+	Labels    map[string]string
+}
+
+// Status of a content operation
+type Status struct {
+	Ref       string
+	Offset    int64
+	Total     int64
+	Expected  digest.Digest
+	StartedAt time.Time
+	UpdatedAt time.Time
+}
+
+// WalkFunc defines the callback for a blob walk.
+type WalkFunc func(Info) error
+
+// Manager provides methods for inspecting, listing and removing content.
+type Manager interface {
+	// Info will return metadata about content available in the content store.
+	//
+	// If the content is not present, ErrNotFound will be returned.
+	Info(ctx context.Context, dgst digest.Digest) (Info, error)
+
+	// Update updates mutable information related to content.
+	// If one or more fieldpaths are provided, only those
+	// fields will be updated.
+	// Mutable fields:
+	//  labels.*
+	Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
+
+	// Walk will call fn for each item in the content store which
+	// match the provided filters. If no filters are given all
+	// items will be walked.
+	Walk(ctx context.Context, fn WalkFunc, filters ...string) error
+
+	// Delete removes the content from the store.
+	Delete(ctx context.Context, dgst digest.Digest) error
+}
+
+// IngestManager provides methods for managing ingests.
+type IngestManager interface {
+	// Status returns the status of the provided ref.
+	Status(ctx context.Context, ref string) (Status, error)
+
+	// ListStatuses returns the status of any active ingestions whose ref match the
+	// provided regular expression. If empty, all active ingestions will be
+	// returned.
+	ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
+
+	// Abort completely cancels the ingest operation targeted by ref.
+	Abort(ctx context.Context, ref string) error
+}
+
+// Writer handles the write of content into a content store
+type Writer interface {
+	// Close is expected to be called after Commit() when commission is needed.
+	// Closing a writer without commit allows resuming or aborting.
+	io.WriteCloser
+
+	// Digest may return empty digest or panics until committed.
+	Digest() digest.Digest
+
+	// Commit commits the blob (but no roll-back is guaranteed on an error).
+	// size and expected can be zero-value when unknown.
+	Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error
+
+	// Status returns the current state of write
+	Status() (Status, error)
+
+	// Truncate updates the size of the target blob
+	Truncate(size int64) error
+}
+
+// Store combines the methods of content-oriented interfaces into a set that
+// are commonly provided by complete implementations.
+type Store interface {
+	Manager
+	Provider
+	IngestManager
+	Ingester
+}
+
+// Opt is used to alter the mutable properties of content
+type Opt func(*Info) error
+
+// WithLabels allows labels to be set on content
+func WithLabels(labels map[string]string) Opt {
+	return func(info *Info) error {
+		info.Labels = labels
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go
new file mode 100644
index 0000000..32efc6c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/content/helpers.go
@@ -0,0 +1,140 @@
+package content
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+)
+
+var (
+	bufPool = sync.Pool{
+		New: func() interface{} {
+			return make([]byte, 1<<20)
+		},
+	}
+)
+
+// NewReader returns a io.Reader from a ReaderAt
+func NewReader(ra ReaderAt) io.Reader {
+	rd := io.NewSectionReader(ra, 0, ra.Size())
+	return rd
+}
+
+// ReadBlob retrieves the entire contents of the blob from the provider.
+//
+// Avoid using this for large blobs, such as layers.
+func ReadBlob(ctx context.Context, provider Provider, dgst digest.Digest) ([]byte, error) {
+	ra, err := provider.ReaderAt(ctx, dgst)
+	if err != nil {
+		return nil, err
+	}
+	defer ra.Close()
+
+	p := make([]byte, ra.Size())
+
+	_, err = ra.ReadAt(p, 0)
+	return p, err
+}
+
+// WriteBlob writes data with the expected digest into the content store. If
+// expected already exists, the method returns immediately and the reader will
+// not be consumed.
+//
+// This is useful when the digest and size are known beforehand.
+//
+// Copy is buffered, so no need to wrap reader in buffered io.
+func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
+	cw, err := cs.Writer(ctx, ref, size, expected)
+	if err != nil {
+		if !errdefs.IsAlreadyExists(err) {
+			return err
+		}
+
+		return nil // all ready present
+	}
+	defer cw.Close()
+
+	return Copy(ctx, cw, r, size, expected, opts...)
+}
+
+// Copy copies data with the expected digest from the reader into the
+// provided content store writer.
+//
+// This is useful when the digest and size are known beforehand. When
+// the size or digest is unknown, these values may be empty.
+//
+// Copy is buffered, so no need to wrap reader in buffered io.
+func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
+	ws, err := cw.Status()
+	if err != nil {
+		return err
+	}
+
+	if ws.Offset > 0 {
+		r, err = seekReader(r, ws.Offset, size)
+		if err != nil {
+			if !isUnseekable(err) {
+				return errors.Wrapf(err, "unabled to resume write to %v", ws.Ref)
+			}
+
+			// reader is unseekable, try to move the writer back to the start.
+			if err := cw.Truncate(0); err != nil {
+				return errors.Wrapf(err, "content writer truncate failed")
+			}
+		}
+	}
+
+	buf := bufPool.Get().([]byte)
+	defer bufPool.Put(buf)
+
+	if _, err := io.CopyBuffer(cw, r, buf); err != nil {
+		return err
+	}
+
+	if err := cw.Commit(ctx, size, expected, opts...); err != nil {
+		if !errdefs.IsAlreadyExists(err) {
+			return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
+		}
+	}
+
+	return nil
+}
+
+var errUnseekable = errors.New("seek not supported")
+
+func isUnseekable(err error) bool {
+	return errors.Cause(err) == errUnseekable
+}
+
+// seekReader attempts to seek the reader to the given offset, either by
+// resolving `io.Seeker` or by detecting `io.ReaderAt`.
+func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
+	// attempt to resolve r as a seeker and setup the offset.
+	seeker, ok := r.(io.Seeker)
+	if ok {
+		nn, err := seeker.Seek(offset, io.SeekStart)
+		if nn != offset {
+			return nil, fmt.Errorf("failed to seek to offset %v", offset)
+		}
+
+		if err != nil {
+			return nil, err
+		}
+
+		return r, nil
+	}
+
+	// ok, let's try io.ReaderAt!
+	readerAt, ok := r.(io.ReaderAt)
+	if ok && size > offset {
+		sr := io.NewSectionReader(readerAt, offset, size)
+		return sr, nil
+	}
+
+	return r, errors.Wrapf(errUnseekable, "seek to offset %v failed", offset)
+}
diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go
new file mode 100644
index 0000000..cf5d0c5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/content/local/locks.go
@@ -0,0 +1,38 @@
+package local
+
+import (
+	"sync"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+)
+
+// Handles locking references
+// TODO: use boltdb for lock status
+
+var (
+	// locks lets us lock in process
+	locks   = map[string]struct{}{}
+	locksMu sync.Mutex
+)
+
+func tryLock(ref string) error {
+	locksMu.Lock()
+	defer locksMu.Unlock()
+
+	if _, ok := locks[ref]; ok {
+		return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", ref)
+	}
+
+	locks[ref] = struct{}{}
+	return nil
+}
+
+func unlock(ref string) {
+	locksMu.Lock()
+	defer locksMu.Unlock()
+
+	if _, ok := locks[ref]; ok {
+		delete(locks, ref)
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go
new file mode 100644
index 0000000..ae1af5d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/content/local/readerat.go
@@ -0,0 +1,24 @@
+package local
+
+import (
+	"os"
+)
+
+// readerat implements io.ReaderAt in a completely stateless manner by opening
+// the referenced file for each call to ReadAt.
+type sizeReaderAt struct {
+	size int64
+	fp   *os.File
+}
+
+func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) {
+	return ra.fp.ReadAt(p, offset)
+}
+
+func (ra sizeReaderAt) Size() int64 {
+	return ra.size
+}
+
+func (ra sizeReaderAt) Close() error {
+	return ra.fp.Close()
+}
diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go
new file mode 100644
index 0000000..c7854cf
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/content/local/store.go
@@ -0,0 +1,414 @@
+package local
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/filters"
+	"github.com/containerd/containerd/log"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+)
+
+var (
+	bufPool = sync.Pool{
+		New: func() interface{} {
+			return make([]byte, 1<<20)
+		},
+	}
+)
+
+// Store is digest-keyed store for content. All data written into the store is
+// stored under a verifiable digest.
+//
+// Store can generally support multi-reader, single-writer ingest of data,
+// including resumable ingest.
+type store struct {
+	root string
+}
+
+// NewStore returns a local content store
+func NewStore(root string) (content.Store, error) {
+	if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) {
+		return nil, err
+	}
+
+	return &store{
+		root: root,
+	}, nil
+}
+
+func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
+	p := s.blobPath(dgst)
+	fi, err := os.Stat(p)
+	if err != nil {
+		if os.IsNotExist(err) {
+			err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
+		}
+
+		return content.Info{}, err
+	}
+
+	return s.info(dgst, fi), nil
+}
+
+func (s *store) info(dgst digest.Digest, fi os.FileInfo) content.Info {
+	return content.Info{
+		Digest:    dgst,
+		Size:      fi.Size(),
+		CreatedAt: fi.ModTime(),
+		UpdatedAt: fi.ModTime(),
+	}
+}
+
+// ReaderAt returns an io.ReaderAt for the blob.
+func (s *store) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) {
+	p := s.blobPath(dgst)
+	fi, err := os.Stat(p)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return nil, err
+		}
+
+		return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", dgst, p)
+	}
+
+	fp, err := os.Open(p)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return nil, err
+		}
+
+		return nil, errors.Wrapf(errdefs.ErrNotFound, "blob %s expected at %s", dgst, p)
+	}
+
+	return sizeReaderAt{size: fi.Size(), fp: fp}, nil
+}
+
+// Delete removes a blob by its digest.
+//
+// While this is safe to do concurrently, safe exist-removal logic must hold
+// some global lock on the store.
+func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
+	if err := os.RemoveAll(s.blobPath(dgst)); err != nil {
+		if !os.IsNotExist(err) {
+			return err
+		}
+
+		return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
+	}
+
+	return nil
+}
+
+func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
+	// TODO: Support persisting and updating mutable content data
+	return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
+}
+
+func (s *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
+	// TODO: Support filters
+	root := filepath.Join(s.root, "blobs")
+	var alg digest.Algorithm
+	return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		if !fi.IsDir() && !alg.Available() {
+			return nil
+		}
+
+		// TODO(stevvooe): There are few more cases with subdirs that should be
+		// handled in case the layout gets corrupted. This isn't strict enough
+		// an may spew bad data.
+
+		if path == root {
+			return nil
+		}
+		if filepath.Dir(path) == root {
+			alg = digest.Algorithm(filepath.Base(path))
+
+			if !alg.Available() {
+				alg = ""
+				return filepath.SkipDir
+			}
+
+			// descending into a hash directory
+			return nil
+		}
+
+		dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
+		if err := dgst.Validate(); err != nil {
+			// log error but don't report
+			log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
+			// if we see this, it could mean some sort of corruption of the
+			// store or extra paths not expected previously.
+		}
+
+		return fn(s.info(dgst, fi))
+	})
+}
+
+func (s *store) Status(ctx context.Context, ref string) (content.Status, error) {
+	return s.status(s.ingestRoot(ref))
+}
+
+func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
+	fp, err := os.Open(filepath.Join(s.root, "ingest"))
+	if err != nil {
+		return nil, err
+	}
+
+	defer fp.Close()
+
+	fis, err := fp.Readdir(-1)
+	if err != nil {
+		return nil, err
+	}
+
+	filter, err := filters.ParseAll(fs...)
+	if err != nil {
+		return nil, err
+	}
+
+	var active []content.Status
+	for _, fi := range fis {
+		p := filepath.Join(s.root, "ingest", fi.Name())
+		stat, err := s.status(p)
+		if err != nil {
+			if !os.IsNotExist(err) {
+				return nil, err
+			}
+
+			// TODO(stevvooe): This is a common error if uploads are being
+			// completed while making this listing. Need to consider taking a
+			// lock on the whole store to coordinate this aspect.
+			//
+			// Another option is to cleanup downloads asynchronously and
+			// coordinate this method with the cleanup process.
+			//
+			// For now, we just skip them, as they really don't exist.
+			continue
+		}
+
+		if filter.Match(adaptStatus(stat)) {
+			active = append(active, stat)
+		}
+	}
+
+	return active, nil
+}
+
+// status works like stat above except uses the path to the ingest.
+func (s *store) status(ingestPath string) (content.Status, error) {
+	dp := filepath.Join(ingestPath, "data")
+	fi, err := os.Stat(dp)
+	if err != nil {
+		if os.IsNotExist(err) {
+			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
+		}
+		return content.Status{}, err
+	}
+
+	ref, err := readFileString(filepath.Join(ingestPath, "ref"))
+	if err != nil {
+		if os.IsNotExist(err) {
+			err = errors.Wrap(errdefs.ErrNotFound, err.Error())
+		}
+		return content.Status{}, err
+	}
+
+	return content.Status{
+		Ref:       ref,
+		Offset:    fi.Size(),
+		Total:     s.total(ingestPath),
+		UpdatedAt: fi.ModTime(),
+		StartedAt: getStartTime(fi),
+	}, nil
+}
+
+func adaptStatus(status content.Status) filters.Adaptor {
+	return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+		if len(fieldpath) == 0 {
+			return "", false
+		}
+		switch fieldpath[0] {
+		case "ref":
+			return status.Ref, true
+		}
+
+		return "", false
+	})
+}
+
+// total attempts to resolve the total expected size for the write.
+func (s *store) total(ingestPath string) int64 {
+	totalS, err := readFileString(filepath.Join(ingestPath, "total"))
+	if err != nil {
+		return 0
+	}
+
+	total, err := strconv.ParseInt(totalS, 10, 64)
+	if err != nil {
+		// represents a corrupted file, should probably remove.
+		return 0
+	}
+
+	return total
+}
+
+// Writer begins or resumes the active writer identified by ref. If the writer
+// is already in use, an error is returned. Only one writer may be in use per
+// ref at a time.
+//
+// The argument `ref` is used to uniquely identify a long-lived writer transaction.
+func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) {
+	// TODO(stevvooe): Need to actually store expected here. We have
+	// code in the service that shouldn't be dealing with this.
+	if expected != "" {
+		p := s.blobPath(expected)
+		if _, err := os.Stat(p); err == nil {
+			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
+		}
+	}
+
+	path, refp, data := s.ingestPaths(ref)
+
+	if err := tryLock(ref); err != nil {
+		return nil, errors.Wrapf(err, "locking ref %v failed", ref)
+	}
+
+	var (
+		digester  = digest.Canonical.Digester()
+		offset    int64
+		startedAt time.Time
+		updatedAt time.Time
+	)
+
+	// ensure that the ingest path has been created.
+	if err := os.Mkdir(path, 0755); err != nil {
+		if !os.IsExist(err) {
+			return nil, err
+		}
+
+		status, err := s.status(path)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed reading status of resume write")
+		}
+
+		if ref != status.Ref {
+			// NOTE(stevvooe): This is fairly catastrophic. Either we have some
+			// layout corruption or a hash collision for the ref key.
+			return nil, errors.Wrapf(err, "ref key does not match: %v != %v", ref, status.Ref)
+		}
+
+		if total > 0 && status.Total > 0 && total != status.Total {
+			return nil, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
+		}
+
+		// slow slow slow!!, send to goroutine or use resumable hashes
+		fp, err := os.Open(data)
+		if err != nil {
+			return nil, err
+		}
+		defer fp.Close()
+
+		p := bufPool.Get().([]byte)
+		defer bufPool.Put(p)
+
+		offset, err = io.CopyBuffer(digester.Hash(), fp, p)
+		if err != nil {
+			return nil, err
+		}
+
+		updatedAt = status.UpdatedAt
+		startedAt = status.StartedAt
+		total = status.Total
+	} else {
+		// the ingest is new, we need to setup the target location.
+		// write the ref to a file for later use
+		if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil {
+			return nil, err
+		}
+
+		if total > 0 {
+			if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil {
+				return nil, err
+			}
+		}
+
+		startedAt = time.Now()
+		updatedAt = startedAt
+	}
+
+	fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to open data file")
+	}
+
+	return &writer{
+		s:         s,
+		fp:        fp,
+		ref:       ref,
+		path:      path,
+		offset:    offset,
+		total:     total,
+		digester:  digester,
+		startedAt: startedAt,
+		updatedAt: updatedAt,
+	}, nil
+}
+
+// Abort an active transaction keyed by ref. If the ingest is active, it will
+// be cancelled. Any resources associated with the ingest will be cleaned.
+func (s *store) Abort(ctx context.Context, ref string) error {
+	root := s.ingestRoot(ref)
+	if err := os.RemoveAll(root); err != nil {
+		if os.IsNotExist(err) {
+			return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref)
+		}
+
+		return err
+	}
+
+	return nil
+}
+
+func (s *store) blobPath(dgst digest.Digest) string {
+	return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex())
+}
+
+func (s *store) ingestRoot(ref string) string {
+	dgst := digest.FromString(ref)
+	return filepath.Join(s.root, "ingest", dgst.Hex())
+}
+
+// ingestPaths are returned. The paths are the following:
+//
+// - root: entire ingest directory
+// - ref: name of the starting ref, must be unique
+// - data: file where data is written
+//
+func (s *store) ingestPaths(ref string) (string, string, string) {
+	var (
+		fp = s.ingestRoot(ref)
+		rp = filepath.Join(fp, "ref")
+		dp = filepath.Join(fp, "data")
+	)
+
+	return fp, rp, dp
+}
+
+func readFileString(path string) (string, error) {
+	p, err := ioutil.ReadFile(path)
+	return string(p), err
+}
diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go
new file mode 100644
index 0000000..46eab02
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go
@@ -0,0 +1,20 @@
+// +build linux solaris darwin freebsd
+
+package local
+
+import (
+	"os"
+	"syscall"
+	"time"
+
+	"github.com/containerd/containerd/sys"
+)
+
+func getStartTime(fi os.FileInfo) time.Time {
+	if st, ok := fi.Sys().(*syscall.Stat_t); ok {
+		return time.Unix(int64(sys.StatCtime(st).Sec),
+			int64(sys.StatCtime(st).Nsec))
+	}
+
+	return fi.ModTime()
+}
diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go
new file mode 100644
index 0000000..7fb6ad4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/content/local/store_windows.go
@@ -0,0 +1,10 @@
+package local
+
+import (
+	"os"
+	"time"
+)
+
+func getStartTime(fi os.FileInfo) time.Time {
+	return fi.ModTime()
+}
diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go
new file mode 100644
index 0000000..c4f1a94
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/content/local/writer.go
@@ -0,0 +1,157 @@
+package local
+
+import (
+	"context"
+	"os"
+	"path/filepath"
+	"runtime"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+)
+
+// writer represents a write transaction against the blob store.
+type writer struct {
+	s         *store
+	fp        *os.File // opened data file
+	path      string   // path to writer dir
+	ref       string   // ref key
+	offset    int64
+	total     int64
+	digester  digest.Digester
+	startedAt time.Time
+	updatedAt time.Time
+}
+
+func (w *writer) Status() (content.Status, error) {
+	return content.Status{
+		Ref:       w.ref,
+		Offset:    w.offset,
+		Total:     w.total,
+		StartedAt: w.startedAt,
+		UpdatedAt: w.updatedAt,
+	}, nil
+}
+
+// Digest returns the current digest of the content, up to the current write.
+//
+// Cannot be called concurrently with `Write`.
+func (w *writer) Digest() digest.Digest {
+	return w.digester.Digest()
+}
+
+// Write p to the transaction.
+//
+// Note that writes are unbuffered to the backing file. When writing, it is
+// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer.
+func (w *writer) Write(p []byte) (n int, err error) {
+	n, err = w.fp.Write(p)
+	w.digester.Hash().Write(p[:n])
+	w.offset += int64(len(p))
+	w.updatedAt = time.Now()
+	return n, err
+}
+
+func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+	if w.fp == nil {
+		return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
+	}
+
+	if err := w.fp.Sync(); err != nil {
+		return errors.Wrap(err, "sync failed")
+	}
+
+	fi, err := w.fp.Stat()
+	if err != nil {
+		return errors.Wrap(err, "stat on ingest file failed")
+	}
+
+	// change to readonly, more important for read, but provides _some_
+	// protection from this point on. We use the existing perms with a mask
+	// only allowing reads honoring the umask on creation.
+	//
+	// This removes write and exec, only allowing read per the creation umask.
+	//
+	// NOTE: Windows does not support this operation
+	if runtime.GOOS != "windows" {
+		if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil {
+			return errors.Wrap(err, "failed to change ingest file permissions")
+		}
+	}
+
+	if size > 0 && size != fi.Size() {
+		return errors.Errorf("unexpected commit size %d, expected %d", fi.Size(), size)
+	}
+
+	if err := w.fp.Close(); err != nil {
+		return errors.Wrap(err, "failed closing ingest")
+	}
+
+	dgst := w.digester.Digest()
+	if expected != "" && expected != dgst {
+		return errors.Errorf("unexpected commit digest %s, expected %s", dgst, expected)
+	}
+
+	var (
+		ingest = filepath.Join(w.path, "data")
+		target = w.s.blobPath(dgst)
+	)
+
+	// make sure parent directories of blob exist
+	if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
+		return err
+	}
+
+	// clean up!!
+	defer os.RemoveAll(w.path)
+
+	if err := os.Rename(ingest, target); err != nil {
+		if os.IsExist(err) {
+			// collision with the target file!
+			return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
+		}
+		return err
+	}
+	commitTime := time.Now()
+	if err := os.Chtimes(target, commitTime, commitTime); err != nil {
+		return err
+	}
+
+	w.fp = nil
+	unlock(w.ref)
+
+	return nil
+}
+
+// Close the writer, flushing any unwritten data and leaving the progress in
+// tact.
+//
+// If one needs to resume the transaction, a new writer can be obtained from
+// `Ingester.Writer` using the same key. The write can then be continued
+// from it was left off.
+//
+// To abandon a transaction completely, first call close then `IngestManager.Abort` to
+// clean up the associated resources.
+func (w *writer) Close() (err error) {
+	if w.fp != nil {
+		w.fp.Sync()
+		err = w.fp.Close()
+		w.fp = nil
+		unlock(w.ref)
+		return
+	}
+
+	return nil
+}
+
+func (w *writer) Truncate(size int64) error {
+	if size != 0 {
+		return errors.New("Truncate: unsupported size")
+	}
+	w.offset = 0
+	w.digester.Hash().Reset()
+	return w.fp.Truncate(0)
+}
diff --git a/vendor/github.com/containerd/containerd/dialer.go b/vendor/github.com/containerd/containerd/dialer.go
new file mode 100644
index 0000000..c87cf12
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/dialer.go
@@ -0,0 +1,51 @@
+package containerd
+
+import (
+	"net"
+	"time"
+
+	"github.com/pkg/errors"
+)
+
+type dialResult struct {
+	c   net.Conn
+	err error
+}
+
+// Dialer returns a GRPC net.Conn connected to the provided address
+func Dialer(address string, timeout time.Duration) (net.Conn, error) {
+	var (
+		stopC = make(chan struct{})
+		synC  = make(chan *dialResult)
+	)
+	go func() {
+		defer close(synC)
+		for {
+			select {
+			case <-stopC:
+				return
+			default:
+				c, err := dialer(address, timeout)
+				if isNoent(err) {
+					<-time.After(10 * time.Millisecond)
+					continue
+				}
+				synC <- &dialResult{c, err}
+				return
+			}
+		}
+	}()
+	select {
+	case dr := <-synC:
+		return dr.c, dr.err
+	case <-time.After(timeout):
+		close(stopC)
+		go func() {
+			dr := <-synC
+			if dr != nil {
+				dr.c.Close()
+			}
+		}()
+		return nil, errors.Errorf("dial %s: timeout", address)
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/dialer_unix.go b/vendor/github.com/containerd/containerd/dialer_unix.go
new file mode 100644
index 0000000..2e97d17
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/dialer_unix.go
@@ -0,0 +1,36 @@
+// +build !windows
+
+package containerd
+
+import (
+	"fmt"
+	"net"
+	"os"
+	"strings"
+	"syscall"
+	"time"
+)
+
+func isNoent(err error) bool {
+	if err != nil {
+		if nerr, ok := err.(*net.OpError); ok {
+			if serr, ok := nerr.Err.(*os.SyscallError); ok {
+				if serr.Err == syscall.ENOENT {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func dialer(address string, timeout time.Duration) (net.Conn, error) {
+	address = strings.TrimPrefix(address, "unix://")
+	return net.DialTimeout("unix", address, timeout)
+}
+
+// DialAddress returns the address with unix:// prepended to the
+// provided address
+func DialAddress(address string) string {
+	return fmt.Sprintf("unix://%s", address)
+}
diff --git a/vendor/github.com/containerd/containerd/dialer_windows.go b/vendor/github.com/containerd/containerd/dialer_windows.go
new file mode 100644
index 0000000..c91a326
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/dialer_windows.go
@@ -0,0 +1,29 @@
+package containerd
+
+import (
+	"net"
+	"os"
+	"syscall"
+	"time"
+
+	winio "github.com/Microsoft/go-winio"
+)
+
+func isNoent(err error) bool {
+	if err != nil {
+		if oerr, ok := err.(*os.PathError); ok {
+			if oerr.Err == syscall.ENOENT {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func dialer(address string, timeout time.Duration) (net.Conn, error) {
+	return winio.DialPipe(address, &timeout)
+}
+
+func DialAddress(address string) string {
+	return address
+}
diff --git a/vendor/github.com/containerd/containerd/diff/diff.go b/vendor/github.com/containerd/containerd/diff/diff.go
new file mode 100644
index 0000000..85cef35
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/diff/diff.go
@@ -0,0 +1,69 @@
+package diff
+
+import (
+	"github.com/containerd/containerd/mount"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"golang.org/x/net/context"
+)
+
+// Config is used to hold parameters needed for a diff operation
+type Config struct {
+	// MediaType is the type of diff to generate
+	// Default depends on the differ,
+	// i.e. application/vnd.oci.image.layer.v1.tar+gzip
+	MediaType string
+
+	// Reference is the content upload reference
+	// Default will use a random reference string
+	Reference string
+
+	// Labels are the labels to apply to the generated content
+	Labels map[string]string
+}
+
+// Opt is used to configure a diff operation
+type Opt func(*Config) error
+
+// Differ allows the apply and creation of filesystem diffs between mounts
+type Differ interface {
+	// Apply applies the content referred to by the given descriptor to
+	// the provided mount. The method of applying is based on the
+	// implementation and content descriptor. For example, in the common
+	// case the descriptor is a file system difference in tar format,
+	// that tar would be applied on top of the mounts.
+	Apply(ctx context.Context, desc ocispec.Descriptor, mount []mount.Mount) (ocispec.Descriptor, error)
+
+	// DiffMounts computes the difference between two mounts and returns a
+	// descriptor for the computed diff. The options can provide
+	// a ref which can be used to track the content creation of the diff.
+	// The media type which is used to determine the format of the created
+	// content can also be provided as an option.
+	DiffMounts(ctx context.Context, lower, upper []mount.Mount, opts ...Opt) (ocispec.Descriptor, error)
+}
+
+// WithMediaType sets the media type to use for creating the diff, without
+// specifying the differ will choose a default.
+func WithMediaType(m string) Opt {
+	return func(c *Config) error {
+		c.MediaType = m
+		return nil
+	}
+}
+
+// WithReference is used to set the content upload reference used by
+// the diff operation. This allows the caller to track the upload through
+// the content store.
+func WithReference(ref string) Opt {
+	return func(c *Config) error {
+		c.Reference = ref
+		return nil
+	}
+}
+
+// WithLabels is used to set content labels on the created diff content.
+func WithLabels(labels map[string]string) Opt {
+	return func(c *Config) error {
+		c.Labels = labels
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go
new file mode 100644
index 0000000..b4d6ea8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/errdefs/errors.go
@@ -0,0 +1,62 @@
+// Package errdefs defines the common errors used throughout containerd
+// packages.
+//
+// Use with errors.Wrap and error.Wrapf to add context to an error.
+//
+// To detect an error class, use the IsXXX functions to tell whether an error
+// is of a certain type.
+//
+// The functions ToGRPC and FromGRPC can be used to map server-side and
+// client-side errors to the correct types.
+package errdefs
+
+import "github.com/pkg/errors"
+
+// Definitions of common error types used throughout containerd. All containerd
+// errors returned by most packages will map into one of these errors classes.
+// Packages should return errors of these types when they want to instruct a
+// client to take a particular action.
+//
+// For the most part, we just try to provide local grpc errors. Most conditions
+// map very well to those defined by grpc.
+var (
+	ErrUnknown            = errors.New("unknown") // used internally to represent a missed mapping.
+	ErrInvalidArgument    = errors.New("invalid argument")
+	ErrNotFound           = errors.New("not found")
+	ErrAlreadyExists      = errors.New("already exists")
+	ErrFailedPrecondition = errors.New("failed precondition")
+	ErrUnavailable        = errors.New("unavailable")
+	ErrNotImplemented     = errors.New("not implemented") // represents not supported and unimplemented
+)
+
+// IsInvalidArgument returns true if the error is due to an invalid argument
+func IsInvalidArgument(err error) bool {
+	return errors.Cause(err) == ErrInvalidArgument
+}
+
+// IsNotFound returns true if the error is due to a missing object
+func IsNotFound(err error) bool {
+	return errors.Cause(err) == ErrNotFound
+}
+
+// IsAlreadyExists returns true if the error is due to an already existing
+// metadata item
+func IsAlreadyExists(err error) bool {
+	return errors.Cause(err) == ErrAlreadyExists
+}
+
+// IsFailedPrecondition returns true if an operation could not proceed to the
+// lack of a particular condition
+func IsFailedPrecondition(err error) bool {
+	return errors.Cause(err) == ErrFailedPrecondition
+}
+
+// IsUnavailable returns true if the error is due to a resource being unavailable
+func IsUnavailable(err error) bool {
+	return errors.Cause(err) == ErrUnavailable
+}
+
+// IsNotImplemented returns true if the error is due to not being implemented
+func IsNotImplemented(err error) bool {
+	return errors.Cause(err) == ErrNotImplemented
+}
diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go
new file mode 100644
index 0000000..2aa2e11
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go
@@ -0,0 +1,109 @@
+package errdefs
+
+import (
+	"strings"
+
+	"github.com/pkg/errors"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// ToGRPC will attempt to map the backend containerd error into a grpc error,
+// using the original error message as a description.
+//
+// Further information may be extracted from certain errors depending on their
+// type.
+//
+// If the error is unmapped, the original error will be returned to be handled
+// by the regular grpc error handling stack.
+func ToGRPC(err error) error {
+	if err == nil {
+		return nil
+	}
+
+	if isGRPCError(err) {
+		// error has already been mapped to grpc
+		return err
+	}
+
+	switch {
+	case IsInvalidArgument(err):
+		return status.Errorf(codes.InvalidArgument, err.Error())
+	case IsNotFound(err):
+		return status.Errorf(codes.NotFound, err.Error())
+	case IsAlreadyExists(err):
+		return status.Errorf(codes.AlreadyExists, err.Error())
+	case IsFailedPrecondition(err):
+		return status.Errorf(codes.FailedPrecondition, err.Error())
+	case IsUnavailable(err):
+		return status.Errorf(codes.Unavailable, err.Error())
+	case IsNotImplemented(err):
+		return status.Errorf(codes.Unimplemented, err.Error())
+	}
+
+	return err
+}
+
+// ToGRPCf maps the error to grpc error codes, assembling the formatting string
+// and combining it with the target error string.
+//
+// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
+func ToGRPCf(err error, format string, args ...interface{}) error {
+	return ToGRPC(errors.Wrapf(err, format, args...))
+}
+
+// FromGRPC returns the underlying error from a grpc service based on the grpc error code
+func FromGRPC(err error) error {
+	if err == nil {
+		return nil
+	}
+
+	var cls error // divide these into error classes, becomes the cause
+
+	switch grpc.Code(err) {
+	case codes.InvalidArgument:
+		cls = ErrInvalidArgument
+	case codes.AlreadyExists:
+		cls = ErrAlreadyExists
+	case codes.NotFound:
+		cls = ErrNotFound
+	case codes.Unavailable:
+		cls = ErrUnavailable
+	case codes.FailedPrecondition:
+		cls = ErrFailedPrecondition
+	case codes.Unimplemented:
+		cls = ErrNotImplemented
+	default:
+		cls = ErrUnknown
+	}
+
+	msg := rebaseMessage(cls, err)
+	if msg != "" {
+		err = errors.Wrapf(cls, msg)
+	} else {
+		err = errors.WithStack(cls)
+	}
+
+	return err
+}
+
+// rebaseMessage removes the repeats for an error at the end of an error
+// string. This will happen when taking an error over grpc then remapping it.
+//
+// Effectively, we just remove the string of cls from the end of err if it
+// appears there.
+func rebaseMessage(cls error, err error) string {
+	desc := grpc.ErrorDesc(err)
+	clss := cls.Error()
+	if desc == clss {
+		return ""
+	}
+
+	return strings.TrimSuffix(desc, ": "+clss)
+}
+
+func isGRPCError(err error) bool {
+	_, ok := status.FromError(err)
+	return ok
+}
diff --git a/vendor/github.com/containerd/containerd/events/events.go b/vendor/github.com/containerd/containerd/events/events.go
new file mode 100644
index 0000000..efe2f59
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/events/events.go
@@ -0,0 +1,31 @@
+package events
+
+import (
+	"context"
+
+	events "github.com/containerd/containerd/api/services/events/v1"
+)
+
+// Event is a generic interface for any type of event
+type Event interface{}
+
+// Publisher posts the event.
+type Publisher interface {
+	Publish(ctx context.Context, topic string, event Event) error
+}
+
+// Forwarder forwards an event to the underlying event bus
+type Forwarder interface {
+	Forward(ctx context.Context, envelope *events.Envelope) error
+}
+
+type publisherFunc func(ctx context.Context, topic string, event Event) error
+
+func (fn publisherFunc) Publish(ctx context.Context, topic string, event Event) error {
+	return fn(ctx, topic, event)
+}
+
+// Subscriber allows callers to subscribe to events
+type Subscriber interface {
+	Subscribe(ctx context.Context, filters ...string) (ch <-chan *events.Envelope, errs <-chan error)
+}
diff --git a/vendor/github.com/containerd/containerd/events/exchange.go b/vendor/github.com/containerd/containerd/events/exchange.go
new file mode 100644
index 0000000..eeeeea3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/events/exchange.go
@@ -0,0 +1,231 @@
+package events
+
+import (
+	"context"
+	"strings"
+	"time"
+
+	events "github.com/containerd/containerd/api/services/events/v1"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/filters"
+	"github.com/containerd/containerd/identifiers"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/namespaces"
+	"github.com/containerd/typeurl"
+	goevents "github.com/docker/go-events"
+	"github.com/gogo/protobuf/types"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+// Exchange broadcasts events
+type Exchange struct {
+	broadcaster *goevents.Broadcaster
+}
+
+// NewExchange returns a new event Exchange
+func NewExchange() *Exchange {
+	return &Exchange{
+		broadcaster: goevents.NewBroadcaster(),
+	}
+}
+
+// Forward accepts an envelope to be direcly distributed on the exchange.
+//
+// This is useful when an event is forwaded on behalf of another namespace or
+// when the event is propagated on behalf of another publisher.
+func (e *Exchange) Forward(ctx context.Context, envelope *events.Envelope) (err error) {
+	if err := validateEnvelope(envelope); err != nil {
+		return err
+	}
+
+	defer func() {
+		logger := log.G(ctx).WithFields(logrus.Fields{
+			"topic": envelope.Topic,
+			"ns":    envelope.Namespace,
+			"type":  envelope.Event.TypeUrl,
+		})
+
+		if err != nil {
+			logger.WithError(err).Error("error forwarding event")
+		} else {
+			logger.Debug("event forwarded")
+		}
+	}()
+
+	return e.broadcaster.Write(envelope)
+}
+
+// Publish packages and sends an event. The caller will be considered the
+// initial publisher of the event. This means the timestamp will be calculated
+// at this point and this method may read from the calling context.
+func (e *Exchange) Publish(ctx context.Context, topic string, event Event) (err error) {
+	var (
+		namespace string
+		encoded   *types.Any
+		envelope  events.Envelope
+	)
+
+	namespace, err = namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return errors.Wrapf(err, "failed publishing event")
+	}
+	if err := validateTopic(topic); err != nil {
+		return errors.Wrapf(err, "envelope topic %q", topic)
+	}
+
+	encoded, err = typeurl.MarshalAny(event)
+	if err != nil {
+		return err
+	}
+
+	envelope.Timestamp = time.Now().UTC()
+	envelope.Namespace = namespace
+	envelope.Topic = topic
+	envelope.Event = encoded
+
+	defer func() {
+		logger := log.G(ctx).WithFields(logrus.Fields{
+			"topic": envelope.Topic,
+			"ns":    envelope.Namespace,
+			"type":  envelope.Event.TypeUrl,
+		})
+
+		if err != nil {
+			logger.WithError(err).Error("error publishing event")
+		} else {
+			logger.Debug("event published")
+		}
+	}()
+
+	return e.broadcaster.Write(&envelope)
+}
+
+// Subscribe to events on the exchange. Events are sent through the returned
+// channel ch. If an error is encountered, it will be sent on channel errs and
+// errs will be closed. To end the subscription, cancel the provided context.
+//
+// Zero or more filters may be provided as strings. Only events that match
+// *any* of the provided filters will be sent on the channel. The filters use
+// the standard containerd filters package syntax.
+func (e *Exchange) Subscribe(ctx context.Context, fs ...string) (ch <-chan *events.Envelope, errs <-chan error) {
+	var (
+		evch                  = make(chan *events.Envelope)
+		errq                  = make(chan error, 1)
+		channel               = goevents.NewChannel(0)
+		queue                 = goevents.NewQueue(channel)
+		dst     goevents.Sink = queue
+	)
+
+	closeAll := func() {
+		defer close(errq)
+		defer e.broadcaster.Remove(dst)
+		defer queue.Close()
+		defer channel.Close()
+	}
+
+	ch = evch
+	errs = errq
+
+	if len(fs) > 0 {
+		filter, err := filters.ParseAll(fs...)
+		if err != nil {
+			errq <- errors.Wrapf(err, "failed parsing subscription filters")
+			closeAll()
+			return
+		}
+
+		dst = goevents.NewFilter(queue, goevents.MatcherFunc(func(gev goevents.Event) bool {
+			return filter.Match(adapt(gev))
+		}))
+	}
+
+	e.broadcaster.Add(dst)
+
+	go func() {
+		defer closeAll()
+
+		var err error
+	loop:
+		for {
+			select {
+			case ev := <-channel.C:
+				env, ok := ev.(*events.Envelope)
+				if !ok {
+					// TODO(stevvooe): For the most part, we are well protected
+					// from this condition. Both Forward and Publish protect
+					// from this.
+					err = errors.Errorf("invalid envelope encountered %#v; please file a bug", ev)
+					break
+				}
+
+				select {
+				case evch <- env:
+				case <-ctx.Done():
+					break loop
+				}
+			case <-ctx.Done():
+				break loop
+			}
+		}
+
+		if err == nil {
+			if cerr := ctx.Err(); cerr != context.Canceled {
+				err = cerr
+			}
+		}
+
+		errq <- err
+	}()
+
+	return
+}
+
+func validateTopic(topic string) error {
+	if topic == "" {
+		return errors.Wrap(errdefs.ErrInvalidArgument, "must not be empty")
+	}
+
+	if topic[0] != '/' {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'")
+	}
+
+	if len(topic) == 1 {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component")
+	}
+
+	components := strings.Split(topic[1:], "/")
+	for _, component := range components {
+		if err := identifiers.Validate(component); err != nil {
+			return errors.Wrapf(err, "failed validation on component %q", component)
+		}
+	}
+
+	return nil
+}
+
+func validateEnvelope(envelope *events.Envelope) error {
+	if err := namespaces.Validate(envelope.Namespace); err != nil {
+		return errors.Wrapf(err, "event envelope has invalid namespace")
+	}
+
+	if err := validateTopic(envelope.Topic); err != nil {
+		return errors.Wrapf(err, "envelope topic %q", envelope.Topic)
+	}
+
+	if envelope.Timestamp.IsZero() {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "timestamp must be set on forwarded event")
+	}
+
+	return nil
+}
+
+func adapt(ev interface{}) filters.Adaptor {
+	if adaptor, ok := ev.(filters.Adaptor); ok {
+		return adaptor
+	}
+
+	return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+		return "", false
+	})
+}
diff --git a/vendor/github.com/containerd/containerd/export.go b/vendor/github.com/containerd/containerd/export.go
new file mode 100644
index 0000000..76bebe3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/export.go
@@ -0,0 +1,189 @@
+package containerd
+
+import (
+	"archive/tar"
+	"context"
+	"encoding/json"
+	"io"
+	"sort"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	ocispecs "github.com/opencontainers/image-spec/specs-go"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+func (c *Client) exportToOCITar(ctx context.Context, desc ocispec.Descriptor, writer io.Writer, eopts exportOpts) error {
+	tw := tar.NewWriter(writer)
+	defer tw.Close()
+
+	records := []tarRecord{
+		ociLayoutFile(""),
+		ociIndexRecord(desc),
+	}
+
+	cs := c.ContentStore()
+	algorithms := map[string]struct{}{}
+	exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		records = append(records, blobRecord(cs, desc))
+		algorithms[desc.Digest.Algorithm().String()] = struct{}{}
+		return nil, nil
+	}
+
+	handlers := images.Handlers(
+		images.ChildrenHandler(cs, platforms.Default()),
+		images.HandlerFunc(exportHandler),
+	)
+
+	// Walk sequentially since the number of fetchs is likely one and doing in
+	// parallel requires locking the export handler
+	if err := images.Walk(ctx, handlers, desc); err != nil {
+		return err
+	}
+
+	if len(algorithms) > 0 {
+		records = append(records, directoryRecord("blobs/", 0755))
+		for alg := range algorithms {
+			records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
+		}
+	}
+
+	return writeTar(ctx, tw, records)
+}
+
+type tarRecord struct {
+	Header *tar.Header
+	CopyTo func(context.Context, io.Writer) (int64, error)
+}
+
+func blobRecord(cs content.Store, desc ocispec.Descriptor) tarRecord {
+	path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     path,
+			Mode:     0444,
+			Size:     desc.Size,
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			r, err := cs.ReaderAt(ctx, desc.Digest)
+			if err != nil {
+				return 0, err
+			}
+			defer r.Close()
+
+			// Verify digest
+			dgstr := desc.Digest.Algorithm().Digester()
+
+			n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
+			if err != nil {
+				return 0, err
+			}
+			if dgstr.Digest() != desc.Digest {
+				return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
+			}
+			return n, nil
+		},
+	}
+}
+
+func directoryRecord(name string, mode int64) tarRecord {
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     name,
+			Mode:     mode,
+			Typeflag: tar.TypeDir,
+		},
+	}
+}
+
+func ociLayoutFile(version string) tarRecord {
+	if version == "" {
+		version = ocispec.ImageLayoutVersion
+	}
+	layout := ocispec.ImageLayout{
+		Version: version,
+	}
+
+	b, err := json.Marshal(layout)
+	if err != nil {
+		panic(err)
+	}
+
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     ocispec.ImageLayoutFile,
+			Mode:     0444,
+			Size:     int64(len(b)),
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			n, err := w.Write(b)
+			return int64(n), err
+		},
+	}
+
+}
+
+func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
+	index := ocispec.Index{
+		Versioned: ocispecs.Versioned{
+			SchemaVersion: 2,
+		},
+		Manifests: manifests,
+	}
+
+	b, err := json.Marshal(index)
+	if err != nil {
+		panic(err)
+	}
+
+	return tarRecord{
+		Header: &tar.Header{
+			Name:     "index.json",
+			Mode:     0644,
+			Size:     int64(len(b)),
+			Typeflag: tar.TypeReg,
+		},
+		CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
+			n, err := w.Write(b)
+			return int64(n), err
+		},
+	}
+}
+
+func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
+	sort.Sort(tarRecordsByName(records))
+
+	for _, record := range records {
+		if err := tw.WriteHeader(record.Header); err != nil {
+			return err
+		}
+		if record.CopyTo != nil {
+			n, err := record.CopyTo(ctx, tw)
+			if err != nil {
+				return err
+			}
+			if n != record.Header.Size {
+				return errors.Errorf("unexpected copy size for %s", record.Header.Name)
+			}
+		} else if record.Header.Size > 0 {
+			return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
+		}
+	}
+	return nil
+}
+
+type tarRecordsByName []tarRecord
+
+func (t tarRecordsByName) Len() int {
+	return len(t)
+}
+func (t tarRecordsByName) Swap(i, j int) {
+	t[i], t[j] = t[j], t[i]
+}
+func (t tarRecordsByName) Less(i, j int) bool {
+	return t[i].Header.Name < t[j].Header.Name
+}
diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go
new file mode 100644
index 0000000..5a5ac7e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/filters/adaptor.go
@@ -0,0 +1,17 @@
+package filters
+
+// Adaptor specifies the mapping of fieldpaths to a type. For the given field
+// path, the value and whether it is present should be returned. The mapping of
+// the fieldpath to a field is deferred to the adaptor implementation, but
+// should generally follow protobuf field path/mask semantics.
+type Adaptor interface {
+	Field(fieldpath []string) (value string, present bool)
+}
+
+// AdapterFunc allows implementation specific matching of fieldpaths
+type AdapterFunc func(fieldpath []string) (string, bool)
+
+// Field returns the field name and true if it exists
+func (fn AdapterFunc) Field(fieldpath []string) (string, bool) {
+	return fn(fieldpath)
+}
diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go
new file mode 100644
index 0000000..6217557
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/filters/filter.go
@@ -0,0 +1,163 @@
+// Package filters defines a syntax and parser that can be used for the
+// filtration of items across the containerd API. The core is built on the
+// concept of protobuf field paths, with quoting.  Several operators allow the
+// user to flexibly select items based on field presence, equality, inequality
+// and regular expressions. Flexible adaptors support working with any type.
+//
+// The syntax is fairly familiar, if you've used container ecosystem
+// projects.  At the core, we base it on the concept of protobuf field
+// paths, augmenting with the ability to quote portions of the field path
+// to match arbitrary labels. These "selectors" come in the following
+// syntax:
+//
+// ```
+// <fieldpath>[<operator><value>]
+// ```
+//
+// A basic example is as follows:
+//
+// ```
+// name==foo
+// ```
+//
+// This would match all objects that have a field `name` with the value
+// `foo`. If we only want to test if the field is present, we can omit the
+// operator. This is most useful for matching labels in containerd. The
+// following will match objects that have the field "labels" and have the
+// label "foo" defined:
+//
+// ```
+// labels.foo
+// ```
+//
+// We also allow for quoting of parts of the field path to allow matching
+// of arbitrary items:
+//
+// ```
+// labels."very complex label"==something
+// ```
+//
+// We also define `!=` and `~=` as operators. The `!=` will match all
+// objects that don't match the value for a field and `~=` will compile the
+// target value as a regular expression and match the field value against that.
+//
+// Selectors can be combined using a comma, such that the resulting
+// selector will require all selectors are matched for the object to match.
+// The following example will match objects that are named `foo` and have
+// the label `bar`:
+//
+// ```
+// name==foo,labels.bar
+// ```
+//
+package filters
+
+import (
+	"regexp"
+
+	"github.com/containerd/containerd/log"
+)
+
+// Filter matches specific resources based the provided filter
+type Filter interface {
+	Match(adaptor Adaptor) bool
+}
+
+// FilterFunc is a function that handles matching with an adaptor
+type FilterFunc func(Adaptor) bool
+
+// Match matches the FilterFunc returning true if the object matches the filter
+func (fn FilterFunc) Match(adaptor Adaptor) bool {
+	return fn(adaptor)
+}
+
+// Always is a filter that always returns true for any type of object
+var Always FilterFunc = func(adaptor Adaptor) bool {
+	return true
+}
+
+// Any allows multiple filters to be matched aginst the object
+type Any []Filter
+
+// Match returns true if any of the provided filters are true
+func (m Any) Match(adaptor Adaptor) bool {
+	for _, m := range m {
+		if m.Match(adaptor) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// All allows multiple filters to be matched aginst the object
+type All []Filter
+
+// Match only returns true if all filters match the object
+func (m All) Match(adaptor Adaptor) bool {
+	for _, m := range m {
+		if !m.Match(adaptor) {
+			return false
+		}
+	}
+
+	return true
+}
+
+type operator int
+
+const (
+	operatorPresent = iota
+	operatorEqual
+	operatorNotEqual
+	operatorMatches
+)
+
+func (op operator) String() string {
+	switch op {
+	case operatorPresent:
+		return "?"
+	case operatorEqual:
+		return "=="
+	case operatorNotEqual:
+		return "!="
+	case operatorMatches:
+		return "~="
+	}
+
+	return "unknown"
+}
+
+type selector struct {
+	fieldpath []string
+	operator  operator
+	value     string
+	re        *regexp.Regexp
+}
+
+func (m selector) Match(adaptor Adaptor) bool {
+	value, present := adaptor.Field(m.fieldpath)
+
+	switch m.operator {
+	case operatorPresent:
+		return present
+	case operatorEqual:
+		return present && value == m.value
+	case operatorNotEqual:
+		return value != m.value
+	case operatorMatches:
+		if m.re == nil {
+			r, err := regexp.Compile(m.value)
+			if err != nil {
+				log.L.Errorf("error compiling regexp %q", m.value)
+				return false
+			}
+
+			m.re = r
+		}
+
+		return m.re.MatchString(value)
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go
new file mode 100644
index 0000000..c9b0984
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/filters/parser.go
@@ -0,0 +1,262 @@
+package filters
+
+import (
+	"fmt"
+	"io"
+	"strconv"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+)
+
+/*
+Parse the strings into a filter that may be used with an adaptor.
+
+The filter is made up of zero or more selectors.
+
+The format is a comma separated list of expressions, in the form of
+`<fieldpath><op><value>`, known as selectors. All selectors must match the
+target object for the filter to be true.
+
+We define the operators "==" for equality, "!=" for not equal and "~=" for a
+regular expression. If the operator and value are not present, the matcher will
+test for the presence of a value, as defined by the target object.
+
+The formal grammar is as follows:
+
+selectors := selector ("," selector)*
+selector  := fieldpath (operator value)
+fieldpath := field ('.' field)*
+field     := quoted | [A-Za-z] [A-Za-z0-9_]+
+operator  := "==" | "!=" | "~="
+value     := quoted | [^\s,]+
+quoted    := <go string syntax>
+
+*/
+func Parse(s string) (Filter, error) {
+	// special case empty to match all
+	if s == "" {
+		return Always, nil
+	}
+
+	p := parser{input: s}
+	return p.parse()
+}
+
+// ParseAll parses each filter in ss and returns a filter that will return true
+// if any filter matches the expression.
+//
+// If no filters are provided, the filter will match anything.
+func ParseAll(ss ...string) (Filter, error) {
+	if len(ss) == 0 {
+		return Always, nil
+	}
+
+	var fs []Filter
+	for _, s := range ss {
+		f, err := Parse(s)
+		if err != nil {
+			return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
+		}
+
+		fs = append(fs, f)
+	}
+
+	return Any(fs), nil
+}
+
+type parser struct {
+	input   string
+	scanner scanner
+}
+
+func (p *parser) parse() (Filter, error) {
+	p.scanner.init(p.input)
+
+	ss, err := p.selectors()
+	if err != nil {
+		return nil, errors.Wrap(err, "filters")
+	}
+
+	return ss, nil
+}
+
+func (p *parser) selectors() (Filter, error) {
+	s, err := p.selector()
+	if err != nil {
+		return nil, err
+	}
+
+	ss := All{s}
+
+loop:
+	for {
+		tok := p.scanner.peek()
+		switch tok {
+		case ',':
+			pos, tok, _ := p.scanner.scan()
+			if tok != tokenSeparator {
+				return nil, p.mkerr(pos, "expected a separator")
+			}
+
+			s, err := p.selector()
+			if err != nil {
+				return nil, err
+			}
+
+			ss = append(ss, s)
+		case tokenEOF:
+			break loop
+		default:
+			return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok))
+		}
+	}
+
+	return ss, nil
+}
+
+func (p *parser) selector() (selector, error) {
+	fieldpath, err := p.fieldpath()
+	if err != nil {
+		return selector{}, err
+	}
+
+	switch p.scanner.peek() {
+	case ',', tokenSeparator, tokenEOF:
+		return selector{
+			fieldpath: fieldpath,
+			operator:  operatorPresent,
+		}, nil
+	}
+
+	op, err := p.operator()
+	if err != nil {
+		return selector{}, err
+	}
+
+	value, err := p.value()
+	if err != nil {
+		if err == io.EOF {
+			return selector{}, io.ErrUnexpectedEOF
+		}
+		return selector{}, err
+	}
+
+	return selector{
+		fieldpath: fieldpath,
+		value:     value,
+		operator:  op,
+	}, nil
+}
+
+func (p *parser) fieldpath() ([]string, error) {
+	f, err := p.field()
+	if err != nil {
+		return nil, err
+	}
+
+	fs := []string{f}
+loop:
+	for {
+		tok := p.scanner.peek() // lookahead to consume field separator
+
+		switch tok {
+		case '.':
+			pos, tok, _ := p.scanner.scan() // consume separator
+			if tok != tokenSeparator {
+				return nil, p.mkerr(pos, "expected a field separator (`.`)")
+			}
+
+			f, err := p.field()
+			if err != nil {
+				return nil, err
+			}
+
+			fs = append(fs, f)
+		default:
+			// let the layer above handle the other bad cases.
+			break loop
+		}
+	}
+
+	return fs, nil
+}
+
+func (p *parser) field() (string, error) {
+	pos, tok, s := p.scanner.scan()
+	switch tok {
+	case tokenField:
+		return s, nil
+	case tokenQuoted:
+		return p.unquote(pos, s)
+	}
+
+	return "", p.mkerr(pos, "expected field or quoted")
+}
+
+func (p *parser) operator() (operator, error) {
+	pos, tok, s := p.scanner.scan()
+	switch tok {
+	case tokenOperator:
+		switch s {
+		case "==":
+			return operatorEqual, nil
+		case "!=":
+			return operatorNotEqual, nil
+		case "~=":
+			return operatorMatches, nil
+		default:
+			return 0, p.mkerr(pos, "unsupported operator %q", s)
+		}
+	}
+
+	return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
+}
+
+func (p *parser) value() (string, error) {
+	pos, tok, s := p.scanner.scan()
+
+	switch tok {
+	case tokenValue, tokenField:
+		return s, nil
+	case tokenQuoted:
+		return p.unquote(pos, s)
+	}
+
+	return "", p.mkerr(pos, "expected value or quoted")
+}
+
+func (p *parser) unquote(pos int, s string) (string, error) {
+	uq, err := strconv.Unquote(s)
+	if err != nil {
+		return "", p.mkerr(pos, "unquoting failed: %v", err)
+	}
+
+	return uq, nil
+}
+
+type parseError struct {
+	input string
+	pos   int
+	msg   string
+}
+
+func (pe parseError) Error() string {
+	if pe.pos < len(pe.input) {
+		before := pe.input[:pe.pos]
+		location := pe.input[pe.pos : pe.pos+1] // need to handle end
+		after := pe.input[pe.pos+1:]
+
+		return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg)
+	}
+
+	return fmt.Sprintf("[%s]: %v", pe.input, pe.msg)
+}
+
+func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
+	return errors.Wrap(parseError{
+		input: p.input,
+		pos:   pos,
+		msg:   fmt.Sprintf(format, args...),
+	}, "parse error")
+}
diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go
new file mode 100644
index 0000000..5a55e0a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/filters/scanner.go
@@ -0,0 +1,267 @@
+package filters
+
+import (
+	"fmt"
+	"unicode"
+	"unicode/utf8"
+)
+
+const (
+	tokenEOF = -(iota + 1)
+	tokenQuoted
+	tokenValue
+	tokenField
+	tokenSeparator
+	tokenOperator
+	tokenIllegal
+)
+
+type token rune
+
+func (t token) String() string {
+	switch t {
+	case tokenEOF:
+		return "EOF"
+	case tokenQuoted:
+		return "Quoted"
+	case tokenValue:
+		return "Value"
+	case tokenField:
+		return "Field"
+	case tokenSeparator:
+		return "Separator"
+	case tokenOperator:
+		return "Operator"
+	case tokenIllegal:
+		return "Illegal"
+	}
+
+	return string(t)
+}
+
+func (t token) GoString() string {
+	return "token" + t.String()
+}
+
+type scanner struct {
+	input string
+	pos   int
+	ppos  int // bounds the current rune in the string
+	value bool
+}
+
+func (s *scanner) init(input string) {
+	s.input = input
+	s.pos = 0
+	s.ppos = 0
+}
+
+func (s *scanner) next() rune {
+	if s.pos >= len(s.input) {
+		return tokenEOF
+	}
+	s.pos = s.ppos
+
+	r, w := utf8.DecodeRuneInString(s.input[s.ppos:])
+	s.ppos += w
+	if r == utf8.RuneError {
+		if w > 0 {
+			return tokenIllegal
+		}
+		return tokenEOF
+	}
+
+	if r == 0 {
+		return tokenIllegal
+	}
+
+	return r
+}
+
+func (s *scanner) peek() rune {
+	pos := s.pos
+	ppos := s.ppos
+	ch := s.next()
+	s.pos = pos
+	s.ppos = ppos
+	return ch
+}
+
+func (s *scanner) scan() (int, token, string) {
+	var (
+		ch  = s.next()
+		pos = s.pos
+	)
+
+chomp:
+	switch {
+	case ch == tokenEOF:
+	case ch == tokenIllegal:
+	case isQuoteRune(ch):
+		s.scanQuoted(ch)
+		return pos, tokenQuoted, s.input[pos:s.ppos]
+	case isSeparatorRune(ch):
+		return pos, tokenSeparator, s.input[pos:s.ppos]
+	case isOperatorRune(ch):
+		s.scanOperator()
+		s.value = true
+		return pos, tokenOperator, s.input[pos:s.ppos]
+	case unicode.IsSpace(ch):
+		// chomp
+		ch = s.next()
+		pos = s.pos
+		goto chomp
+	case s.value:
+		s.scanValue()
+		s.value = false
+		return pos, tokenValue, s.input[pos:s.ppos]
+	case isFieldRune(ch):
+		s.scanField()
+		return pos, tokenField, s.input[pos:s.ppos]
+	}
+
+	return s.pos, token(ch), ""
+}
+
+func (s *scanner) scanField() {
+	for {
+		ch := s.peek()
+		if !isFieldRune(ch) {
+			break
+		}
+		s.next()
+	}
+}
+
+func (s *scanner) scanOperator() {
+	for {
+		ch := s.peek()
+		switch ch {
+		case '=', '!', '~':
+			s.next()
+		default:
+			return
+		}
+	}
+}
+
+func (s *scanner) scanValue() {
+	for {
+		ch := s.peek()
+		if !isValueRune(ch) {
+			break
+		}
+		s.next()
+	}
+}
+
+func (s *scanner) scanQuoted(quote rune) {
+	ch := s.next() // read character after quote
+	for ch != quote {
+		if ch == '\n' || ch < 0 {
+			s.error("literal not terminated")
+			return
+		}
+		if ch == '\\' {
+			ch = s.scanEscape(quote)
+		} else {
+			ch = s.next()
+		}
+	}
+	return
+}
+
+func (s *scanner) scanEscape(quote rune) rune {
+	ch := s.next() // read character after '/'
+	switch ch {
+	case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote:
+		// nothing to do
+		ch = s.next()
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		ch = s.scanDigits(ch, 8, 3)
+	case 'x':
+		ch = s.scanDigits(s.next(), 16, 2)
+	case 'u':
+		ch = s.scanDigits(s.next(), 16, 4)
+	case 'U':
+		ch = s.scanDigits(s.next(), 16, 8)
+	default:
+		s.error("illegal char escape")
+	}
+	return ch
+}
+
+func (s *scanner) scanDigits(ch rune, base, n int) rune {
+	for n > 0 && digitVal(ch) < base {
+		ch = s.next()
+		n--
+	}
+	if n > 0 {
+		s.error("illegal char escape")
+	}
+	return ch
+}
+
+func (s *scanner) error(msg string) {
+	fmt.Println("error fixme", msg)
+}
+
+func digitVal(ch rune) int {
+	switch {
+	case '0' <= ch && ch <= '9':
+		return int(ch - '0')
+	case 'a' <= ch && ch <= 'f':
+		return int(ch - 'a' + 10)
+	case 'A' <= ch && ch <= 'F':
+		return int(ch - 'A' + 10)
+	}
+	return 16 // larger than any legal digit val
+}
+
+func isFieldRune(r rune) bool {
+	return (r == '_' || isAlphaRune(r) || isDigitRune(r))
+}
+
+func isAlphaRune(r rune) bool {
+	return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z'
+}
+
+func isDigitRune(r rune) bool {
+	return r >= '0' && r <= '9'
+}
+
+func isOperatorRune(r rune) bool {
+	switch r {
+	case '=', '!', '~':
+		return true
+	}
+
+	return false
+}
+
+func isQuoteRune(r rune) bool {
+	switch r {
+	case '"': // maybe add single quoting?
+		return true
+	}
+
+	return false
+}
+
+func isSeparatorRune(r rune) bool {
+	switch r {
+	case ',', '.':
+		return true
+	}
+
+	return false
+}
+
+func isValueRune(r rune) bool {
+	return r != ',' && !unicode.IsSpace(r) &&
+		(unicode.IsLetter(r) ||
+			unicode.IsDigit(r) ||
+			unicode.IsNumber(r) ||
+			unicode.IsGraphic(r) ||
+			unicode.IsPunct(r))
+}
diff --git a/vendor/github.com/containerd/containerd/fs/copy.go b/vendor/github.com/containerd/containerd/fs/copy.go
new file mode 100644
index 0000000..0d11fa5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/copy.go
@@ -0,0 +1,120 @@
+package fs
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+
+	"github.com/pkg/errors"
+)
+
+var (
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return make([]byte, 32*1024)
+		},
+	}
+)
+
+// CopyDir copies the directory from src to dst.
+// Most efficient copy of files is attempted.
+func CopyDir(dst, src string) error {
+	inodes := map[uint64]string{}
+	return copyDirectory(dst, src, inodes)
+}
+
+func copyDirectory(dst, src string, inodes map[uint64]string) error {
+	stat, err := os.Stat(src)
+	if err != nil {
+		return errors.Wrapf(err, "failed to stat %s", src)
+	}
+	if !stat.IsDir() {
+		return errors.Errorf("source is not directory")
+	}
+
+	if st, err := os.Stat(dst); err != nil {
+		if err := os.Mkdir(dst, stat.Mode()); err != nil {
+			return errors.Wrapf(err, "failed to mkdir %s", dst)
+		}
+	} else if !st.IsDir() {
+		return errors.Errorf("cannot copy to non-directory: %s", dst)
+	} else {
+		if err := os.Chmod(dst, stat.Mode()); err != nil {
+			return errors.Wrapf(err, "failed to chmod on %s", dst)
+		}
+	}
+
+	fis, err := ioutil.ReadDir(src)
+	if err != nil {
+		return errors.Wrapf(err, "failed to read %s", src)
+	}
+
+	if err := copyFileInfo(stat, dst); err != nil {
+		return errors.Wrapf(err, "failed to copy file info for %s", dst)
+	}
+
+	for _, fi := range fis {
+		source := filepath.Join(src, fi.Name())
+		target := filepath.Join(dst, fi.Name())
+
+		switch {
+		case fi.IsDir():
+			if err := copyDirectory(target, source, inodes); err != nil {
+				return err
+			}
+			continue
+		case (fi.Mode() & os.ModeType) == 0:
+			link, err := getLinkSource(target, fi, inodes)
+			if err != nil {
+				return errors.Wrap(err, "failed to get hardlink")
+			}
+			if link != "" {
+				if err := os.Link(link, target); err != nil {
+					return errors.Wrap(err, "failed to create hard link")
+				}
+			} else if err := copyFile(source, target); err != nil {
+				return errors.Wrap(err, "failed to copy files")
+			}
+		case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink:
+			link, err := os.Readlink(source)
+			if err != nil {
+				return errors.Wrapf(err, "failed to read link: %s", source)
+			}
+			if err := os.Symlink(link, target); err != nil {
+				return errors.Wrapf(err, "failed to create symlink: %s", target)
+			}
+		case (fi.Mode() & os.ModeDevice) == os.ModeDevice:
+			if err := copyDevice(target, fi); err != nil {
+				return errors.Wrapf(err, "failed to create device")
+			}
+		default:
+			// TODO: Support pipes and sockets
+			return errors.Wrapf(err, "unsupported mode %s", fi.Mode())
+		}
+		if err := copyFileInfo(fi, target); err != nil {
+			return errors.Wrap(err, "failed to copy file info")
+		}
+
+		if err := copyXAttrs(target, source); err != nil {
+			return errors.Wrap(err, "failed to copy xattrs")
+		}
+	}
+
+	return nil
+}
+
+func copyFile(source, target string) error {
+	src, err := os.Open(source)
+	if err != nil {
+		return errors.Wrapf(err, "failed to open source %s", source)
+	}
+	defer src.Close()
+	tgt, err := os.Create(target)
+	if err != nil {
+		return errors.Wrapf(err, "failed to open target %s", target)
+	}
+	defer tgt.Close()
+
+	return copyFileContent(tgt, src)
+}
diff --git a/vendor/github.com/containerd/containerd/fs/copy_linux.go b/vendor/github.com/containerd/containerd/fs/copy_linux.go
new file mode 100644
index 0000000..efe4753
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/copy_linux.go
@@ -0,0 +1,83 @@
+package fs
+
+import (
+	"io"
+	"os"
+	"syscall"
+
+	"github.com/containerd/containerd/sys"
+	"github.com/containerd/continuity/sysx"
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+)
+
+func copyFileInfo(fi os.FileInfo, name string) error {
+	st := fi.Sys().(*syscall.Stat_t)
+	if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
+		return errors.Wrapf(err, "failed to chown %s", name)
+	}
+
+	if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
+		if err := os.Chmod(name, fi.Mode()); err != nil {
+			return errors.Wrapf(err, "failed to chmod %s", name)
+		}
+	}
+
+	timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))}
+	if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
+		return errors.Wrapf(err, "failed to utime %s", name)
+	}
+
+	return nil
+}
+
+func copyFileContent(dst, src *os.File) error {
+	st, err := src.Stat()
+	if err != nil {
+		return errors.Wrap(err, "unable to stat source")
+	}
+
+	n, err := unix.CopyFileRange(int(src.Fd()), nil, int(dst.Fd()), nil, int(st.Size()), 0)
+	if err != nil {
+		if err != unix.ENOSYS && err != unix.EXDEV {
+			return errors.Wrap(err, "copy file range failed")
+		}
+
+		buf := bufferPool.Get().([]byte)
+		_, err = io.CopyBuffer(dst, src, buf)
+		bufferPool.Put(buf)
+		return err
+	}
+
+	if int64(n) != st.Size() {
+		return errors.Wrapf(err, "short copy: %d of %d", int64(n), st.Size())
+	}
+
+	return nil
+}
+
+func copyXAttrs(dst, src string) error {
+	xattrKeys, err := sysx.LListxattr(src)
+	if err != nil {
+		return errors.Wrapf(err, "failed to list xattrs on %s", src)
+	}
+	for _, xattr := range xattrKeys {
+		data, err := sysx.LGetxattr(src, xattr)
+		if err != nil {
+			return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
+		}
+		if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
+			return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
+		}
+	}
+
+	return nil
+}
+
+func copyDevice(dst string, fi os.FileInfo) error {
+	st, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return errors.New("unsupported stat type")
+	}
+	return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
+}
diff --git a/vendor/github.com/containerd/containerd/fs/copy_unix.go b/vendor/github.com/containerd/containerd/fs/copy_unix.go
new file mode 100644
index 0000000..6234f3d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/copy_unix.go
@@ -0,0 +1,68 @@
+// +build solaris darwin freebsd
+
+package fs
+
+import (
+	"io"
+	"os"
+	"syscall"
+
+	"github.com/containerd/containerd/sys"
+	"github.com/containerd/continuity/sysx"
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+)
+
+func copyFileInfo(fi os.FileInfo, name string) error {
+	st := fi.Sys().(*syscall.Stat_t)
+	if err := os.Lchown(name, int(st.Uid), int(st.Gid)); err != nil {
+		return errors.Wrapf(err, "failed to chown %s", name)
+	}
+
+	if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
+		if err := os.Chmod(name, fi.Mode()); err != nil {
+			return errors.Wrapf(err, "failed to chmod %s", name)
+		}
+	}
+
+	timespec := []syscall.Timespec{sys.StatAtime(st), sys.StatMtime(st)}
+	if err := syscall.UtimesNano(name, timespec); err != nil {
+		return errors.Wrapf(err, "failed to utime %s", name)
+	}
+
+	return nil
+}
+
+func copyFileContent(dst, src *os.File) error {
+	buf := bufferPool.Get().([]byte)
+	_, err := io.CopyBuffer(dst, src, buf)
+	bufferPool.Put(buf)
+
+	return err
+}
+
+func copyXAttrs(dst, src string) error {
+	xattrKeys, err := sysx.LListxattr(src)
+	if err != nil {
+		return errors.Wrapf(err, "failed to list xattrs on %s", src)
+	}
+	for _, xattr := range xattrKeys {
+		data, err := sysx.LGetxattr(src, xattr)
+		if err != nil {
+			return errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)
+		}
+		if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
+			return errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)
+		}
+	}
+
+	return nil
+}
+
+func copyDevice(dst string, fi os.FileInfo) error {
+	st, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return errors.New("unsupported stat type")
+	}
+	return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
+}
diff --git a/vendor/github.com/containerd/containerd/fs/copy_windows.go b/vendor/github.com/containerd/containerd/fs/copy_windows.go
new file mode 100644
index 0000000..fb4933c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/copy_windows.go
@@ -0,0 +1,33 @@
+package fs
+
+import (
+	"io"
+	"os"
+
+	"github.com/pkg/errors"
+)
+
+func copyFileInfo(fi os.FileInfo, name string) error {
+	if err := os.Chmod(name, fi.Mode()); err != nil {
+		return errors.Wrapf(err, "failed to chmod %s", name)
+	}
+
+	// TODO: copy windows specific metadata
+
+	return nil
+}
+
+func copyFileContent(dst, src *os.File) error {
+	buf := bufferPool.Get().([]byte)
+	_, err := io.CopyBuffer(dst, src, buf)
+	bufferPool.Put(buf)
+	return err
+}
+
+func copyXAttrs(dst, src string) error {
+	return nil
+}
+
+func copyDevice(dst string, fi os.FileInfo) error {
+	return errors.New("device copy not supported")
+}
diff --git a/vendor/github.com/containerd/containerd/fs/diff.go b/vendor/github.com/containerd/containerd/fs/diff.go
new file mode 100644
index 0000000..9073d0d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/diff.go
@@ -0,0 +1,310 @@
+package fs
+
+import (
+	"context"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"golang.org/x/sync/errgroup"
+
+	"github.com/sirupsen/logrus"
+)
+
+// ChangeKind is the type of modification that
+// a change is making.
+type ChangeKind int
+
+const (
+	// ChangeKindUnmodified represents an unmodified
+	// file
+	ChangeKindUnmodified = iota
+
+	// ChangeKindAdd represents an addition of
+	// a file
+	ChangeKindAdd
+
+	// ChangeKindModify represents a change to
+	// an existing file
+	ChangeKindModify
+
+	// ChangeKindDelete represents a delete of
+	// a file
+	ChangeKindDelete
+)
+
+func (k ChangeKind) String() string {
+	switch k {
+	case ChangeKindUnmodified:
+		return "unmodified"
+	case ChangeKindAdd:
+		return "add"
+	case ChangeKindModify:
+		return "modify"
+	case ChangeKindDelete:
+		return "delete"
+	default:
+		return ""
+	}
+}
+
+// Change represents single change between a diff and its parent.
+type Change struct {
+	Kind ChangeKind
+	Path string
+}
+
+// ChangeFunc is the type of function called for each change
+// computed during a directory changes calculation.
+type ChangeFunc func(ChangeKind, string, os.FileInfo, error) error
+
+// Changes computes changes between two directories calling the
+// given change function for each computed change. The first
+// directory is intended to the base directory and second
+// directory the changed directory.
+//
+// The change callback is called by the order of path names and
+// should be appliable in that order.
+//  Due to this apply ordering, the following is true
+//  - Removed directory trees only create a single change for the root
+//    directory removed. Remaining changes are implied.
+//  - A directory which is modified to become a file will not have
+//    delete entries for sub-path items, their removal is implied
+//    by the removal of the parent directory.
+//
+// Opaque directories will not be treated specially and each file
+// removed from the base directory will show up as a removal.
+//
+// File content comparisons will be done on files which have timestamps
+// which may have been truncated. If either of the files being compared
+// has a zero value nanosecond value, each byte will be compared for
+// differences. If 2 files have the same seconds value but different
+// nanosecond values where one of those values is zero, the files will
+// be considered unchanged if the content is the same. This behavior
+// is to account for timestamp truncation during archiving.
+func Changes(ctx context.Context, a, b string, changeFn ChangeFunc) error {
+	if a == "" {
+		logrus.Debugf("Using single walk diff for %s", b)
+		return addDirChanges(ctx, changeFn, b)
+	} else if diffOptions := detectDirDiff(b, a); diffOptions != nil {
+		logrus.Debugf("Using single walk diff for %s from %s", diffOptions.diffDir, a)
+		return diffDirChanges(ctx, changeFn, a, diffOptions)
+	}
+
+	logrus.Debugf("Using double walk diff for %s from %s", b, a)
+	return doubleWalkDiff(ctx, changeFn, a, b)
+}
+
+func addDirChanges(ctx context.Context, changeFn ChangeFunc, root string) error {
+	return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		// Rebase path
+		path, err = filepath.Rel(root, path)
+		if err != nil {
+			return err
+		}
+
+		path = filepath.Join(string(os.PathSeparator), path)
+
+		// Skip root
+		if path == string(os.PathSeparator) {
+			return nil
+		}
+
+		return changeFn(ChangeKindAdd, path, f, nil)
+	})
+}
+
+// diffDirOptions is used when the diff can be directly calculated from
+// a diff directory to its base, without walking both trees.
+type diffDirOptions struct {
+	diffDir      string
+	skipChange   func(string) (bool, error)
+	deleteChange func(string, string, os.FileInfo) (string, error)
+}
+
+// diffDirChanges walks the diff directory and compares changes against the base.
+func diffDirChanges(ctx context.Context, changeFn ChangeFunc, base string, o *diffDirOptions) error {
+	changedDirs := make(map[string]struct{})
+	return filepath.Walk(o.diffDir, func(path string, f os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		// Rebase path
+		path, err = filepath.Rel(o.diffDir, path)
+		if err != nil {
+			return err
+		}
+
+		path = filepath.Join(string(os.PathSeparator), path)
+
+		// Skip root
+		if path == string(os.PathSeparator) {
+			return nil
+		}
+
+		// TODO: handle opaqueness, start new double walker at this
+		// location to get deletes, and skip tree in single walker
+
+		if o.skipChange != nil {
+			if skip, err := o.skipChange(path); skip {
+				return err
+			}
+		}
+
+		var kind ChangeKind
+
+		deletedFile, err := o.deleteChange(o.diffDir, path, f)
+		if err != nil {
+			return err
+		}
+
+		// Find out what kind of modification happened
+		if deletedFile != "" {
+			path = deletedFile
+			kind = ChangeKindDelete
+			f = nil
+		} else {
+			// Otherwise, the file was added
+			kind = ChangeKindAdd
+
+			// ...Unless it already existed in a base, in which case, it's a modification
+			stat, err := os.Stat(filepath.Join(base, path))
+			if err != nil && !os.IsNotExist(err) {
+				return err
+			}
+			if err == nil {
+				// The file existed in the base, so that's a modification
+
+				// However, if it's a directory, maybe it wasn't actually modified.
+				// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
+				if stat.IsDir() && f.IsDir() {
+					if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
+						// Both directories are the same, don't record the change
+						return nil
+					}
+				}
+				kind = ChangeKindModify
+			}
+		}
+
+		// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
+		// This block is here to ensure the change is recorded even if the
+		// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
+		// Check https://github.com/docker/docker/pull/13590 for details.
+		if f.IsDir() {
+			changedDirs[path] = struct{}{}
+		}
+		if kind == ChangeKindAdd || kind == ChangeKindDelete {
+			parent := filepath.Dir(path)
+			if _, ok := changedDirs[parent]; !ok && parent != "/" {
+				pi, err := os.Stat(filepath.Join(o.diffDir, parent))
+				if err := changeFn(ChangeKindModify, parent, pi, err); err != nil {
+					return err
+				}
+				changedDirs[parent] = struct{}{}
+			}
+		}
+
+		return changeFn(kind, path, f, nil)
+	})
+}
+
+// doubleWalkDiff walks both directories to create a diff
+func doubleWalkDiff(ctx context.Context, changeFn ChangeFunc, a, b string) (err error) {
+	g, ctx := errgroup.WithContext(ctx)
+
+	var (
+		c1 = make(chan *currentPath)
+		c2 = make(chan *currentPath)
+
+		f1, f2 *currentPath
+		rmdir  string
+	)
+	g.Go(func() error {
+		defer close(c1)
+		return pathWalk(ctx, a, c1)
+	})
+	g.Go(func() error {
+		defer close(c2)
+		return pathWalk(ctx, b, c2)
+	})
+	g.Go(func() error {
+		for c1 != nil || c2 != nil {
+			if f1 == nil && c1 != nil {
+				f1, err = nextPath(ctx, c1)
+				if err != nil {
+					return err
+				}
+				if f1 == nil {
+					c1 = nil
+				}
+			}
+
+			if f2 == nil && c2 != nil {
+				f2, err = nextPath(ctx, c2)
+				if err != nil {
+					return err
+				}
+				if f2 == nil {
+					c2 = nil
+				}
+			}
+			if f1 == nil && f2 == nil {
+				continue
+			}
+
+			var f os.FileInfo
+			k, p := pathChange(f1, f2)
+			switch k {
+			case ChangeKindAdd:
+				if rmdir != "" {
+					rmdir = ""
+				}
+				f = f2.f
+				f2 = nil
+			case ChangeKindDelete:
+				// Check if this file is already removed by being
+				// under of a removed directory
+				if rmdir != "" && strings.HasPrefix(f1.path, rmdir) {
+					f1 = nil
+					continue
+				} else if rmdir == "" && f1.f.IsDir() {
+					rmdir = f1.path + string(os.PathSeparator)
+				} else if rmdir != "" {
+					rmdir = ""
+				}
+				f1 = nil
+			case ChangeKindModify:
+				same, err := sameFile(f1, f2)
+				if err != nil {
+					return err
+				}
+				if f1.f.IsDir() && !f2.f.IsDir() {
+					rmdir = f1.path + string(os.PathSeparator)
+				} else if rmdir != "" {
+					rmdir = ""
+				}
+				f = f2.f
+				f1 = nil
+				f2 = nil
+				if same {
+					if !isLinked(f) {
+						continue
+					}
+					k = ChangeKindUnmodified
+				}
+			}
+			if err := changeFn(k, p, f, nil); err != nil {
+				return err
+			}
+		}
+		return nil
+	})
+
+	return g.Wait()
+}
diff --git a/vendor/github.com/containerd/containerd/fs/diff_unix.go b/vendor/github.com/containerd/containerd/fs/diff_unix.go
new file mode 100644
index 0000000..36a0f3f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/diff_unix.go
@@ -0,0 +1,102 @@
+// +build !windows
+
+package fs
+
+import (
+	"bytes"
+	"os"
+	"path/filepath"
+	"strings"
+	"syscall"
+
+	"github.com/containerd/continuity/sysx"
+	"github.com/pkg/errors"
+)
+
+// whiteouts are files with a special meaning for the layered filesystem.
+// Docker uses AUFS whiteout files inside exported archives. In other
+// filesystems these files are generated/handled on tar creation/extraction.
+
+// whiteoutPrefix prefix means file is a whiteout. If this is followed by a
+// filename this means that file has been removed from the base layer.
+const whiteoutPrefix = ".wh."
+
+// whiteoutMetaPrefix prefix means whiteout has a special meaning and is not
+// for removing an actual file. Normally these files are excluded from exported
+// archives.
+const whiteoutMetaPrefix = whiteoutPrefix + whiteoutPrefix
+
+// whiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
+// layers. Normally these should not go into exported archives and all changed
+// hardlinks should be copied to the top layer.
+const whiteoutLinkDir = whiteoutMetaPrefix + "plnk"
+
+// whiteoutOpaqueDir file means directory has been made opaque - meaning
+// readdir calls to this directory do not follow to lower layers.
+const whiteoutOpaqueDir = whiteoutMetaPrefix + ".opq"
+
+// detectDirDiff returns diff dir options if a directory could
+// be found in the mount info for upper which is the direct
+// diff with the provided lower directory
+func detectDirDiff(upper, lower string) *diffDirOptions {
+	// TODO: get mount options for upper
+	// TODO: detect AUFS
+	// TODO: detect overlay
+	return nil
+}
+
+func aufsMetadataSkip(path string) (skip bool, err error) {
+	skip, err = filepath.Match(string(os.PathSeparator)+whiteoutMetaPrefix+"*", path)
+	if err != nil {
+		skip = true
+	}
+	return
+}
+
+func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
+	f := filepath.Base(path)
+
+	// If there is a whiteout, then the file was removed
+	if strings.HasPrefix(f, whiteoutPrefix) {
+		originalFile := f[len(whiteoutPrefix):]
+		return filepath.Join(filepath.Dir(path), originalFile), nil
+	}
+
+	return "", nil
+}
+
+// compareSysStat returns whether the stats are equivalent,
+// whether the files are considered the same file, and
+// an error
+func compareSysStat(s1, s2 interface{}) (bool, error) {
+	ls1, ok := s1.(*syscall.Stat_t)
+	if !ok {
+		return false, nil
+	}
+	ls2, ok := s2.(*syscall.Stat_t)
+	if !ok {
+		return false, nil
+	}
+
+	return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil
+}
+
+func compareCapabilities(p1, p2 string) (bool, error) {
+	c1, err := sysx.LGetxattr(p1, "security.capability")
+	if err != nil && err != sysx.ENODATA {
+		return false, errors.Wrapf(err, "failed to get xattr for %s", p1)
+	}
+	c2, err := sysx.LGetxattr(p2, "security.capability")
+	if err != nil && err != sysx.ENODATA {
+		return false, errors.Wrapf(err, "failed to get xattr for %s", p2)
+	}
+	return bytes.Equal(c1, c2), nil
+}
+
+func isLinked(f os.FileInfo) bool {
+	s, ok := f.Sys().(*syscall.Stat_t)
+	if !ok {
+		return false
+	}
+	return !f.IsDir() && s.Nlink > 1
+}
diff --git a/vendor/github.com/containerd/containerd/fs/diff_windows.go b/vendor/github.com/containerd/containerd/fs/diff_windows.go
new file mode 100644
index 0000000..7bbd662
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/diff_windows.go
@@ -0,0 +1,21 @@
+package fs
+
+import "os"
+
+func detectDirDiff(upper, lower string) *diffDirOptions {
+	return nil
+}
+
+func compareSysStat(s1, s2 interface{}) (bool, error) {
+	// TODO: Use windows specific sys type
+	return false, nil
+}
+
+func compareCapabilities(p1, p2 string) (bool, error) {
+	// TODO: Use windows equivalent
+	return true, nil
+}
+
+func isLinked(os.FileInfo) bool {
+	return false
+}
diff --git a/vendor/github.com/containerd/containerd/fs/dtype_linux.go b/vendor/github.com/containerd/containerd/fs/dtype_linux.go
new file mode 100644
index 0000000..cc06573
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/dtype_linux.go
@@ -0,0 +1,87 @@
+// +build linux
+
+package fs
+
+import (
+	"fmt"
+	"io/ioutil"
+	"os"
+	"syscall"
+	"unsafe"
+)
+
+func locateDummyIfEmpty(path string) (string, error) {
+	children, err := ioutil.ReadDir(path)
+	if err != nil {
+		return "", err
+	}
+	if len(children) != 0 {
+		return "", nil
+	}
+	dummyFile, err := ioutil.TempFile(path, "fsutils-dummy")
+	if err != nil {
+		return "", err
+	}
+	name := dummyFile.Name()
+	err = dummyFile.Close()
+	return name, err
+}
+
+// SupportsDType returns whether the filesystem mounted on path supports d_type
+func SupportsDType(path string) (bool, error) {
+	// locate dummy so that we have at least one dirent
+	dummy, err := locateDummyIfEmpty(path)
+	if err != nil {
+		return false, err
+	}
+	if dummy != "" {
+		defer os.Remove(dummy)
+	}
+
+	visited := 0
+	supportsDType := true
+	fn := func(ent *syscall.Dirent) bool {
+		visited++
+		if ent.Type == syscall.DT_UNKNOWN {
+			supportsDType = false
+			// stop iteration
+			return true
+		}
+		// continue iteration
+		return false
+	}
+	if err = iterateReadDir(path, fn); err != nil {
+		return false, err
+	}
+	if visited == 0 {
+		return false, fmt.Errorf("did not hit any dirent during iteration %s", path)
+	}
+	return supportsDType, nil
+}
+
+func iterateReadDir(path string, fn func(*syscall.Dirent) bool) error {
+	d, err := os.Open(path)
+	if err != nil {
+		return err
+	}
+	defer d.Close()
+	fd := int(d.Fd())
+	buf := make([]byte, 4096)
+	for {
+		nbytes, err := syscall.ReadDirent(fd, buf)
+		if err != nil {
+			return err
+		}
+		if nbytes == 0 {
+			break
+		}
+		for off := 0; off < nbytes; {
+			ent := (*syscall.Dirent)(unsafe.Pointer(&buf[off]))
+			if stop := fn(ent); stop {
+				return nil
+			}
+			off += int(ent.Reclen)
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du.go b/vendor/github.com/containerd/containerd/fs/du.go
new file mode 100644
index 0000000..61f439d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/du.go
@@ -0,0 +1,13 @@
+package fs
+
+// Usage of disk information
+type Usage struct {
+	Inodes int64
+	Size   int64
+}
+
+// DiskUsage counts the number of inodes and disk usage for the resources under
+// path.
+func DiskUsage(roots ...string) (Usage, error) {
+	return diskUsage(roots...)
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du_unix.go b/vendor/github.com/containerd/containerd/fs/du_unix.go
new file mode 100644
index 0000000..d8654d3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/du_unix.go
@@ -0,0 +1,47 @@
+// +build !windows
+
+package fs
+
+import (
+	"os"
+	"path/filepath"
+	"syscall"
+)
+
+func diskUsage(roots ...string) (Usage, error) {
+	type inode struct {
+		// TODO(stevvooe): Can probably reduce memory usage by not tracking
+		// device, but we can leave this right for now.
+		dev, ino uint64
+	}
+
+	var (
+		size   int64
+		inodes = map[inode]struct{}{} // expensive!
+	)
+
+	for _, root := range roots {
+		if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+			if err != nil {
+				return err
+			}
+
+			stat := fi.Sys().(*syscall.Stat_t)
+
+			inoKey := inode{dev: uint64(stat.Dev), ino: uint64(stat.Ino)}
+			if _, ok := inodes[inoKey]; !ok {
+				inodes[inoKey] = struct{}{}
+				size += fi.Size()
+			}
+
+			return nil
+		}); err != nil {
+			return Usage{}, err
+		}
+	}
+
+	return Usage{
+		Inodes: int64(len(inodes)),
+		Size:   size,
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/fs/du_windows.go b/vendor/github.com/containerd/containerd/fs/du_windows.go
new file mode 100644
index 0000000..4a0363c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/du_windows.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package fs
+
+import (
+	"os"
+	"path/filepath"
+)
+
+func diskUsage(roots ...string) (Usage, error) {
+	var (
+		size int64
+	)
+
+	// TODO(stevvooe): Support inodes (or equivalent) for windows.
+
+	for _, root := range roots {
+		if err := filepath.Walk(root, func(path string, fi os.FileInfo, err error) error {
+			if err != nil {
+				return err
+			}
+
+			size += fi.Size()
+			return nil
+		}); err != nil {
+			return Usage{}, err
+		}
+	}
+
+	return Usage{
+		Size: size,
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/fs/hardlink.go b/vendor/github.com/containerd/containerd/fs/hardlink.go
new file mode 100644
index 0000000..38da938
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/hardlink.go
@@ -0,0 +1,27 @@
+package fs
+
+import "os"
+
+// GetLinkInfo returns an identifier representing the node a hardlink is pointing
+// to. If the file is not hard linked then 0 will be returned.
+func GetLinkInfo(fi os.FileInfo) (uint64, bool) {
+	return getLinkInfo(fi)
+}
+
+// getLinkSource returns a path for the given name and
+// file info to its link source in the provided inode
+// map. If the given file name is not in the map and
+// has other links, it is added to the inode map
+// to be a source for other link locations.
+func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
+	inode, isHardlink := getLinkInfo(fi)
+	if !isHardlink {
+		return "", nil
+	}
+
+	path, ok := inodes[inode]
+	if !ok {
+		inodes[inode] = name
+	}
+	return path, nil
+}
diff --git a/vendor/github.com/containerd/containerd/fs/hardlink_unix.go b/vendor/github.com/containerd/containerd/fs/hardlink_unix.go
new file mode 100644
index 0000000..3b825c9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/hardlink_unix.go
@@ -0,0 +1,17 @@
+// +build !windows
+
+package fs
+
+import (
+	"os"
+	"syscall"
+)
+
+func getLinkInfo(fi os.FileInfo) (uint64, bool) {
+	s, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return 0, false
+	}
+
+	return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1
+}
diff --git a/vendor/github.com/containerd/containerd/fs/hardlink_windows.go b/vendor/github.com/containerd/containerd/fs/hardlink_windows.go
new file mode 100644
index 0000000..ad8845a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/hardlink_windows.go
@@ -0,0 +1,7 @@
+package fs
+
+import "os"
+
+func getLinkInfo(fi os.FileInfo) (uint64, bool) {
+	return 0, false
+}
diff --git a/vendor/github.com/containerd/containerd/fs/path.go b/vendor/github.com/containerd/containerd/fs/path.go
new file mode 100644
index 0000000..644b1ee
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/path.go
@@ -0,0 +1,261 @@
+package fs
+
+import (
+	"bytes"
+	"context"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+var (
+	errTooManyLinks = errors.New("too many links")
+)
+
+type currentPath struct {
+	path     string
+	f        os.FileInfo
+	fullPath string
+}
+
+func pathChange(lower, upper *currentPath) (ChangeKind, string) {
+	if lower == nil {
+		if upper == nil {
+			panic("cannot compare nil paths")
+		}
+		return ChangeKindAdd, upper.path
+	}
+	if upper == nil {
+		return ChangeKindDelete, lower.path
+	}
+	// TODO: compare by directory
+
+	switch i := strings.Compare(lower.path, upper.path); {
+	case i < 0:
+		// File in lower that is not in upper
+		return ChangeKindDelete, lower.path
+	case i > 0:
+		// File in upper that is not in lower
+		return ChangeKindAdd, upper.path
+	default:
+		return ChangeKindModify, upper.path
+	}
+}
+
+func sameFile(f1, f2 *currentPath) (bool, error) {
+	if os.SameFile(f1.f, f2.f) {
+		return true, nil
+	}
+
+	equalStat, err := compareSysStat(f1.f.Sys(), f2.f.Sys())
+	if err != nil || !equalStat {
+		return equalStat, err
+	}
+
+	if eq, err := compareCapabilities(f1.fullPath, f2.fullPath); err != nil || !eq {
+		return eq, err
+	}
+
+	// If not a directory also check size, modtime, and content
+	if !f1.f.IsDir() {
+		if f1.f.Size() != f2.f.Size() {
+			return false, nil
+		}
+		t1 := f1.f.ModTime()
+		t2 := f2.f.ModTime()
+
+		if t1.Unix() != t2.Unix() {
+			return false, nil
+		}
+
+		// If the timestamp may have been truncated in one of the
+		// files, check content of file to determine difference
+		if t1.Nanosecond() == 0 || t2.Nanosecond() == 0 {
+			if f1.f.Size() > 0 {
+				eq, err := compareFileContent(f1.fullPath, f2.fullPath)
+				if err != nil || !eq {
+					return eq, err
+				}
+			}
+		} else if t1.Nanosecond() != t2.Nanosecond() {
+			return false, nil
+		}
+	}
+
+	return true, nil
+}
+
+const compareChuckSize = 32 * 1024
+
+// compareFileContent compares the content of 2 same sized files
+// by comparing each byte.
+func compareFileContent(p1, p2 string) (bool, error) {
+	f1, err := os.Open(p1)
+	if err != nil {
+		return false, err
+	}
+	defer f1.Close()
+	f2, err := os.Open(p2)
+	if err != nil {
+		return false, err
+	}
+	defer f2.Close()
+
+	b1 := make([]byte, compareChuckSize)
+	b2 := make([]byte, compareChuckSize)
+	for {
+		n1, err1 := f1.Read(b1)
+		if err1 != nil && err1 != io.EOF {
+			return false, err1
+		}
+		n2, err2 := f2.Read(b2)
+		if err2 != nil && err2 != io.EOF {
+			return false, err2
+		}
+		if n1 != n2 || !bytes.Equal(b1[:n1], b2[:n2]) {
+			return false, nil
+		}
+		if err1 == io.EOF && err2 == io.EOF {
+			return true, nil
+		}
+	}
+}
+
+func pathWalk(ctx context.Context, root string, pathC chan<- *currentPath) error {
+	return filepath.Walk(root, func(path string, f os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+
+		// Rebase path
+		path, err = filepath.Rel(root, path)
+		if err != nil {
+			return err
+		}
+
+		path = filepath.Join(string(os.PathSeparator), path)
+
+		// Skip root
+		if path == string(os.PathSeparator) {
+			return nil
+		}
+
+		p := &currentPath{
+			path:     path,
+			f:        f,
+			fullPath: filepath.Join(root, path),
+		}
+
+		select {
+		case <-ctx.Done():
+			return ctx.Err()
+		case pathC <- p:
+			return nil
+		}
+	})
+}
+
+func nextPath(ctx context.Context, pathC <-chan *currentPath) (*currentPath, error) {
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	case p := <-pathC:
+		return p, nil
+	}
+}
+
+// RootPath joins a path with a root, evaluating and bounding any
+// symlink to the root directory.
+func RootPath(root, path string) (string, error) {
+	if path == "" {
+		return root, nil
+	}
+	var linksWalked int // to protect against cycles
+	for {
+		i := linksWalked
+		newpath, err := walkLinks(root, path, &linksWalked)
+		if err != nil {
+			return "", err
+		}
+		path = newpath
+		if i == linksWalked {
+			newpath = filepath.Join("/", newpath)
+			if path == newpath {
+				return filepath.Join(root, newpath), nil
+			}
+			path = newpath
+		}
+	}
+}
+
+func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) {
+	if *linksWalked > 255 {
+		return "", false, errTooManyLinks
+	}
+
+	path = filepath.Join("/", path)
+	if path == "/" {
+		return path, false, nil
+	}
+	realPath := filepath.Join(root, path)
+
+	fi, err := os.Lstat(realPath)
+	if err != nil {
+		// If path does not yet exist, treat as non-symlink
+		if os.IsNotExist(err) {
+			return path, false, nil
+		}
+		return "", false, err
+	}
+	if fi.Mode()&os.ModeSymlink == 0 {
+		return path, false, nil
+	}
+	newpath, err = os.Readlink(realPath)
+	if err != nil {
+		return "", false, err
+	}
+	if filepath.IsAbs(newpath) && strings.HasPrefix(newpath, root) {
+		newpath = newpath[:len(root)]
+		if !strings.HasPrefix(newpath, "/") {
+			newpath = "/" + newpath
+		}
+	}
+	*linksWalked++
+	return newpath, true, nil
+}
+
+func walkLinks(root, path string, linksWalked *int) (string, error) {
+	switch dir, file := filepath.Split(path); {
+	case dir == "":
+		newpath, _, err := walkLink(root, file, linksWalked)
+		return newpath, err
+	case file == "":
+		if os.IsPathSeparator(dir[len(dir)-1]) {
+			if dir == "/" {
+				return dir, nil
+			}
+			return walkLinks(root, dir[:len(dir)-1], linksWalked)
+		}
+		newpath, _, err := walkLink(root, dir, linksWalked)
+		return newpath, err
+	default:
+		newdir, err := walkLinks(root, dir, linksWalked)
+		if err != nil {
+			return "", err
+		}
+		newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked)
+		if err != nil {
+			return "", err
+		}
+		if !islink {
+			return newpath, nil
+		}
+		if filepath.IsAbs(newpath) {
+			return newpath, nil
+		}
+		return filepath.Join(newdir, newpath), nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/fs/time.go b/vendor/github.com/containerd/containerd/fs/time.go
new file mode 100644
index 0000000..c336f4d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/fs/time.go
@@ -0,0 +1,13 @@
+package fs
+
+import "time"
+
+// Gnu tar and the go tar writer don't have sub-second mtime
+// precision, which is problematic when we apply changes via tar
+// files, we handle this by comparing for exact times, *or* same
+// second count and either a or b having exactly 0 nanoseconds
+func sameFsTime(a, b time.Time) bool {
+	return a == b ||
+		(a.Unix() == b.Unix() &&
+			(a.Nanosecond() == 0 || b.Nanosecond() == 0))
+}
diff --git a/vendor/github.com/containerd/containerd/gc/gc.go b/vendor/github.com/containerd/containerd/gc/gc.go
new file mode 100644
index 0000000..e892be1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/gc/gc.go
@@ -0,0 +1,160 @@
+// Package gc experiments with providing central gc tooling to ensure
+// deterministic resource removal within containerd.
+//
+// For now, we just have a single exported implementation that can be used
+// under certain use cases.
+package gc
+
+import (
+	"context"
+	"sync"
+)
+
+// Resourcetype represents type of resource at a node
+type ResourceType uint8
+
+// Node presents a resource which has a type and key,
+// this node can be used to lookup other nodes.
+type Node struct {
+	Type      ResourceType
+	Namespace string
+	Key       string
+}
+
+// Tricolor implements basic, single-thread tri-color GC. Given the roots, the
+// complete set and a refs function, this function returns a map of all
+// reachable objects.
+//
+// Correct usage requires that the caller not allow the arguments to change
+// until the result is used to delete objects in the system.
+//
+// It will allocate memory proportional to the size of the reachable set.
+//
+// We can probably use this to inform a design for incremental GC by injecting
+// callbacks to the set modification algorithms.
+func Tricolor(roots []Node, refs func(ref Node) ([]Node, error)) (map[Node]struct{}, error) {
+	var (
+		grays     []Node                // maintain a gray "stack"
+		seen      = map[Node]struct{}{} // or not "white", basically "seen"
+		reachable = map[Node]struct{}{} // or "block", in tri-color parlance
+	)
+
+	grays = append(grays, roots...)
+
+	for len(grays) > 0 {
+		// Pick any gray object
+		id := grays[len(grays)-1] // effectively "depth first" because first element
+		grays = grays[:len(grays)-1]
+		seen[id] = struct{}{} // post-mark this as not-white
+		rs, err := refs(id)
+		if err != nil {
+			return nil, err
+		}
+
+		// mark all the referenced objects as gray
+		for _, target := range rs {
+			if _, ok := seen[target]; !ok {
+				grays = append(grays, target)
+			}
+		}
+
+		// mark as black when done
+		reachable[id] = struct{}{}
+	}
+
+	return reachable, nil
+}
+
+// ConcurrentMark implements simple, concurrent GC. All the roots are scanned
+// and the complete set of references is formed by calling the refs function
+// for each seen object. This function returns a map of all object reachable
+// from a root.
+//
+// Correct usage requires that the caller not allow the arguments to change
+// until the result is used to delete objects in the system.
+//
+// It will allocate memory proportional to the size of the reachable set.
+func ConcurrentMark(ctx context.Context, root <-chan Node, refs func(context.Context, Node, func(Node)) error) (map[Node]struct{}, error) {
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+
+	var (
+		grays = make(chan Node)
+		seen  = map[Node]struct{}{} // or not "white", basically "seen"
+		wg    sync.WaitGroup
+
+		errOnce sync.Once
+		refErr  error
+	)
+
+	go func() {
+		for gray := range grays {
+			if _, ok := seen[gray]; ok {
+				wg.Done()
+				continue
+			}
+			seen[gray] = struct{}{} // post-mark this as non-white
+
+			go func(gray Node) {
+				defer wg.Done()
+
+				send := func(n Node) {
+					wg.Add(1)
+					select {
+					case grays <- n:
+					case <-ctx.Done():
+						wg.Done()
+					}
+				}
+
+				if err := refs(ctx, gray, send); err != nil {
+					errOnce.Do(func() {
+						refErr = err
+						cancel()
+					})
+				}
+
+			}(gray)
+		}
+	}()
+
+	for r := range root {
+		wg.Add(1)
+		select {
+		case grays <- r:
+		case <-ctx.Done():
+			wg.Done()
+		}
+
+	}
+
+	// Wait for outstanding grays to be processed
+	wg.Wait()
+
+	close(grays)
+
+	if refErr != nil {
+		return nil, refErr
+	}
+	if cErr := ctx.Err(); cErr != nil {
+		return nil, cErr
+	}
+
+	return seen, nil
+}
+
+// Sweep removes all nodes returned through the channel which are not in
+// the reachable set by calling the provided remove function.
+func Sweep(reachable map[Node]struct{}, all <-chan Node, remove func(Node) error) error {
+	// All black objects are now reachable, and all white objects are
+	// unreachable. Free those that are white!
+	for node := range all {
+		if _, ok := reachable[node]; !ok {
+			if err := remove(node); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/grpc.go b/vendor/github.com/containerd/containerd/grpc.go
new file mode 100644
index 0000000..c56eead
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/grpc.go
@@ -0,0 +1,35 @@
+package containerd
+
+import (
+	"github.com/containerd/containerd/namespaces"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+)
+
+type namespaceInterceptor struct {
+	namespace string
+}
+
+func (ni namespaceInterceptor) unary(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+	_, ok := namespaces.Namespace(ctx)
+	if !ok {
+		ctx = namespaces.WithNamespace(ctx, ni.namespace)
+	}
+	return invoker(ctx, method, req, reply, cc, opts...)
+}
+
+func (ni namespaceInterceptor) stream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
+	_, ok := namespaces.Namespace(ctx)
+	if !ok {
+		ctx = namespaces.WithNamespace(ctx, ni.namespace)
+	}
+
+	return streamer(ctx, desc, cc, method, opts...)
+}
+
+func newNSInterceptors(ns string) (grpc.UnaryClientInterceptor, grpc.StreamClientInterceptor) {
+	ni := namespaceInterceptor{
+		namespace: ns,
+	}
+	return grpc.UnaryClientInterceptor(ni.unary), grpc.StreamClientInterceptor(ni.stream)
+}
diff --git a/vendor/github.com/containerd/containerd/identifiers/validate.go b/vendor/github.com/containerd/containerd/identifiers/validate.go
new file mode 100644
index 0000000..37f9b72
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/identifiers/validate.go
@@ -0,0 +1,57 @@
+// Package identifiers provides common validation for identifiers and keys
+// across containerd.
+//
+// Identifiers in containerd must be a alphanumeric, allowing limited
+// underscores, dashes and dots.
+//
+// While the character set may be expanded in the future, identifiers
+// are guaranteed to be safely used as filesystem path components.
+package identifiers
+
+import (
+	"regexp"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+)
+
+const (
+	maxLength  = 76
+	alphanum   = `[A-Za-z0-9]+`
+	separators = `[._-]`
+)
+
+var (
+	// identifierRe defines the pattern for valid identifiers.
+	identifierRe = regexp.MustCompile(reAnchor(alphanum + reGroup(separators+reGroup(alphanum)) + "*"))
+)
+
+// Validate return nil if the string s is a valid identifier.
+//
+// identifiers must be valid domain names according to RFC 1035, section 2.3.1.  To
+// enforce case insensitvity, all characters must be lower case.
+//
+// In general, identifiers that pass this validation, should be safe for use as
+// a domain names or filesystem path component.
+func Validate(s string) error {
+	if len(s) == 0 {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier must not be empty")
+	}
+
+	if len(s) > maxLength {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q greater than maximum length (%d characters)", s, maxLength)
+	}
+
+	if !identifierRe.MatchString(s) {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "identifier %q must match %v", s, identifierRe)
+	}
+	return nil
+}
+
+func reGroup(s string) string {
+	return `(?:` + s + `)`
+}
+
+func reAnchor(s string) string {
+	return `^` + s + `$`
+}
diff --git a/vendor/github.com/containerd/containerd/image.go b/vendor/github.com/containerd/containerd/image.go
new file mode 100644
index 0000000..b41037a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/image.go
@@ -0,0 +1,162 @@
+package containerd
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/containerd/rootfs"
+	"github.com/containerd/containerd/snapshot"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/opencontainers/image-spec/identity"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+// Image describes an image used by containers
+type Image interface {
+	// Name of the image
+	Name() string
+	// Target descriptor for the image content
+	Target() ocispec.Descriptor
+	// Unpack unpacks the image's content into a snapshot
+	Unpack(context.Context, string) error
+	// RootFS returns the unpacked diffids that make up images rootfs.
+	RootFS(ctx context.Context) ([]digest.Digest, error)
+	// Size returns the total size of the image's packed resources.
+	Size(ctx context.Context) (int64, error)
+	// Config descriptor for the image.
+	Config(ctx context.Context) (ocispec.Descriptor, error)
+}
+
+var _ = (Image)(&image{})
+
+type image struct {
+	client *Client
+
+	i images.Image
+}
+
+func (i *image) Name() string {
+	return i.i.Name
+}
+
+func (i *image) Target() ocispec.Descriptor {
+	return i.i.Target
+}
+
+func (i *image) RootFS(ctx context.Context) ([]digest.Digest, error) {
+	provider := i.client.ContentStore()
+	return i.i.RootFS(ctx, provider, platforms.Default())
+}
+
+func (i *image) Size(ctx context.Context) (int64, error) {
+	provider := i.client.ContentStore()
+	return i.i.Size(ctx, provider, platforms.Default())
+}
+
+func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) {
+	provider := i.client.ContentStore()
+	return i.i.Config(ctx, provider, platforms.Default())
+}
+
+func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
+	layers, err := i.getLayers(ctx, platforms.Default())
+	if err != nil {
+		return err
+	}
+
+	var (
+		sn = i.client.SnapshotService(snapshotterName)
+		a  = i.client.DiffService()
+		cs = i.client.ContentStore()
+
+		chain    []digest.Digest
+		unpacked bool
+	)
+	for _, layer := range layers {
+		labels := map[string]string{
+			"containerd.io/gc.root":      time.Now().UTC().Format(time.RFC3339),
+			"containerd.io/uncompressed": layer.Diff.Digest.String(),
+		}
+		lastUnpacked := unpacked
+
+		unpacked, err = rootfs.ApplyLayer(ctx, layer, chain, sn, a, snapshot.WithLabels(labels))
+		if err != nil {
+			return err
+		}
+
+		if lastUnpacked {
+			info := snapshot.Info{
+				Name: identity.ChainID(chain).String(),
+			}
+
+			// Remove previously created gc.root label
+			if _, err := sn.Update(ctx, info, "labels.containerd.io/gc.root"); err != nil {
+				return err
+			}
+		}
+
+		chain = append(chain, layer.Diff.Digest)
+	}
+
+	if unpacked {
+		desc, err := i.i.Config(ctx, cs, platforms.Default())
+		if err != nil {
+			return err
+		}
+
+		rootfs := identity.ChainID(chain).String()
+
+		cinfo := content.Info{
+			Digest: desc.Digest,
+			Labels: map[string]string{
+				fmt.Sprintf("containerd.io/gc.ref.snapshot.%s", snapshotterName): rootfs,
+			},
+		}
+		if _, err := cs.Update(ctx, cinfo, fmt.Sprintf("labels.containerd.io/gc.ref.snapshot.%s", snapshotterName)); err != nil {
+			return err
+		}
+
+		sinfo := snapshot.Info{
+			Name: rootfs,
+		}
+
+		// Config now referenced snapshot, release root reference
+		if _, err := sn.Update(ctx, sinfo, "labels.containerd.io/gc.root"); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (i *image) getLayers(ctx context.Context, platform string) ([]rootfs.Layer, error) {
+	cs := i.client.ContentStore()
+
+	manifest, err := images.Manifest(ctx, cs, i.i.Target, platform)
+	if err != nil {
+		return nil, errors.Wrap(err, "")
+	}
+
+	diffIDs, err := i.i.RootFS(ctx, cs, platform)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to resolve rootfs")
+	}
+	if len(diffIDs) != len(manifest.Layers) {
+		return nil, errors.Errorf("mismatched image rootfs and manifest layers")
+	}
+	layers := make([]rootfs.Layer, len(diffIDs))
+	for i := range diffIDs {
+		layers[i].Diff = ocispec.Descriptor{
+			// TODO: derive media type from compressed type
+			MediaType: ocispec.MediaTypeImageLayer,
+			Digest:    diffIDs[i],
+		}
+		layers[i].Blob = manifest.Layers[i]
+	}
+	return layers, nil
+}
diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go
new file mode 100644
index 0000000..63acdb7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/images/handlers.go
@@ -0,0 +1,135 @@
+package images
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/containerd/containerd/content"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"golang.org/x/sync/errgroup"
+)
+
+var (
+	// ErrSkipDesc is used to skip processing of a descriptor and
+	// its descendants.
+	ErrSkipDesc = fmt.Errorf("skip descriptor")
+
+	// ErrStopHandler is used to signify that the descriptor
+	// has been handled and should not be handled further.
+	// This applies only to a single descriptor in a handler
+	// chain and does not apply to descendant descriptors.
+	ErrStopHandler = fmt.Errorf("stop handler")
+)
+
+// Handler handles image manifests
+type Handler interface {
+	Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
+}
+
+// HandlerFunc function implementing the Handler interface
+type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error)
+
+// Handle image manifests
+func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
+	return fn(ctx, desc)
+}
+
+// Handlers returns a handler that will run the handlers in sequence.
+//
+// A handler may return `ErrStopHandler` to stop calling additional handlers
+func Handlers(handlers ...Handler) HandlerFunc {
+	return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
+		var children []ocispec.Descriptor
+		for _, handler := range handlers {
+			ch, err := handler.Handle(ctx, desc)
+			if err != nil {
+				if errors.Cause(err) == ErrStopHandler {
+					break
+				}
+				return nil, err
+			}
+
+			children = append(children, ch...)
+		}
+
+		return children, nil
+	}
+}
+
+// Walk the resources of an image and call the handler for each. If the handler
+// decodes the sub-resources for each image,
+//
+// This differs from dispatch in that each sibling resource is considered
+// synchronously.
+func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
+	for _, desc := range descs {
+
+		children, err := handler.Handle(ctx, desc)
+		if err != nil {
+			if errors.Cause(err) == ErrSkipDesc {
+				continue // don't traverse the children.
+			}
+			return err
+		}
+
+		if len(children) > 0 {
+			if err := Walk(ctx, handler, children...); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+// Dispatch runs the provided handler for content specified by the descriptors.
+// If the handler decode subresources, they will be visited, as well.
+//
+// Handlers for siblings are run in parallel on the provided descriptors. A
+// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse
+// any children.
+//
+// Typically, this function will be used with `FetchHandler`, often composed
+// with other handlers.
+//
+// If any handler returns an error, the dispatch session will be canceled.
+func Dispatch(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error {
+	eg, ctx := errgroup.WithContext(ctx)
+	for _, desc := range descs {
+		desc := desc
+
+		eg.Go(func() error {
+			desc := desc
+
+			children, err := handler.Handle(ctx, desc)
+			if err != nil {
+				if errors.Cause(err) == ErrSkipDesc {
+					return nil // don't traverse the children.
+				}
+				return err
+			}
+
+			if len(children) > 0 {
+				return Dispatch(ctx, handler, children...)
+			}
+
+			return nil
+		})
+	}
+
+	return eg.Wait()
+}
+
+// ChildrenHandler decodes well-known manifest types and returns their children.
+//
+// This is useful for supporting recursive fetch and other use cases where you
+// want to do a full walk of resources.
+//
+// One can also replace this with another implementation to allow descending of
+// arbitrary types.
+func ChildrenHandler(provider content.Provider, platform string) HandlerFunc {
+	return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		return Children(ctx, provider, desc, platform)
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go
new file mode 100644
index 0000000..4c78c6c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/images/image.go
@@ -0,0 +1,352 @@
+package images
+
+import (
+	"context"
+	"encoding/json"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/platforms"
+	digest "github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+// Image provides the model for how containerd views container images.
+type Image struct {
+	// Name of the image.
+	//
+	// To be pulled, it must be a reference compatible with resolvers.
+	//
+	// This field is required.
+	Name string
+
+	// Labels provide runtime decoration for the image record.
+	//
+	// There is no default behavior for how these labels are propagated. They
+	// only decorate the static metadata object.
+	//
+	// This field is optional.
+	Labels map[string]string
+
+	// Target describes the root content for this image. Typically, this is
+	// a manifest, index or manifest list.
+	Target ocispec.Descriptor
+
+	CreatedAt, UpdatedAt time.Time
+}
+
+// Store and interact with images
+type Store interface {
+	Get(ctx context.Context, name string) (Image, error)
+	List(ctx context.Context, filters ...string) ([]Image, error)
+	Create(ctx context.Context, image Image) (Image, error)
+
+	// Update will replace the data in the store with the provided image. If
+	// one or more fieldpaths are provided, only those fields will be updated.
+	Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error)
+
+	Delete(ctx context.Context, name string) error
+}
+
+// TODO(stevvooe): Many of these functions make strong platform assumptions,
+// which are untrue in a lot of cases. More refactoring must be done here to
+// make this work in all cases.
+
+// Config resolves the image configuration descriptor.
+//
+// The caller can then use the descriptor to resolve and process the
+// configuration of the image.
+func (image *Image) Config(ctx context.Context, provider content.Provider, platform string) (ocispec.Descriptor, error) {
+	return Config(ctx, provider, image.Target, platform)
+}
+
+// RootFS returns the unpacked diffids that make up and images rootfs.
+//
+// These are used to verify that a set of layers unpacked to the expected
+// values.
+func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform string) ([]digest.Digest, error) {
+	desc, err := image.Config(ctx, provider, platform)
+	if err != nil {
+		return nil, err
+	}
+	return RootFS(ctx, provider, desc)
+}
+
+// Size returns the total size of an image's packed resources.
+func (image *Image) Size(ctx context.Context, provider content.Provider, platform string) (int64, error) {
+	var size int64
+	return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		if desc.Size < 0 {
+			return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
+		}
+		size += desc.Size
+		return nil, nil
+	}), ChildrenHandler(provider, platform)), image.Target)
+}
+
+// Manifest resolves a manifest from the image for the given platform.
+//
+// TODO(stevvooe): This violates the current platform agnostic approach to this
+// package by returning a specific manifest type. We'll need to refactor this
+// to return a manifest descriptor or decide that we want to bring the API in
+// this direction because this abstraction is not needed.`
+func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (ocispec.Manifest, error) {
+	var (
+		matcher platforms.Matcher
+		m       *ocispec.Manifest
+		err     error
+	)
+	if platform != "" {
+		matcher, err = platforms.Parse(platform)
+		if err != nil {
+			return ocispec.Manifest{}, err
+		}
+	}
+
+	if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		switch desc.MediaType {
+		case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+			p, err := content.ReadBlob(ctx, provider, desc.Digest)
+			if err != nil {
+				return nil, err
+			}
+
+			var manifest ocispec.Manifest
+			if err := json.Unmarshal(p, &manifest); err != nil {
+				return nil, err
+			}
+
+			if platform != "" {
+				if desc.Platform != nil && !matcher.Match(*desc.Platform) {
+					return nil, nil
+				}
+
+				if desc.Platform == nil {
+					p, err := content.ReadBlob(ctx, provider, manifest.Config.Digest)
+					if err != nil {
+						return nil, err
+					}
+
+					var image ocispec.Image
+					if err := json.Unmarshal(p, &image); err != nil {
+						return nil, err
+					}
+
+					if !matcher.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) {
+						return nil, nil
+					}
+
+				}
+			}
+
+			m = &manifest
+
+			return nil, nil
+		case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+			p, err := content.ReadBlob(ctx, provider, desc.Digest)
+			if err != nil {
+				return nil, err
+			}
+
+			var idx ocispec.Index
+			if err := json.Unmarshal(p, &idx); err != nil {
+				return nil, err
+			}
+
+			if platform == "" {
+				return idx.Manifests, nil
+			}
+
+			var descs []ocispec.Descriptor
+			for _, d := range idx.Manifests {
+				if d.Platform == nil || matcher.Match(*d.Platform) {
+					descs = append(descs, d)
+				}
+			}
+
+			return descs, nil
+
+		}
+		return nil, errors.Wrap(errdefs.ErrNotFound, "could not resolve manifest")
+	}), image); err != nil {
+		return ocispec.Manifest{}, err
+	}
+
+	if m == nil {
+		return ocispec.Manifest{}, errors.Wrap(errdefs.ErrNotFound, "manifest not found")
+	}
+
+	return *m, nil
+}
+
+// Config resolves the image configuration descriptor using a content provided
+// to resolve child resources on the image.
+//
+// The caller can then use the descriptor to resolve and process the
+// configuration of the image.
+func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (ocispec.Descriptor, error) {
+	manifest, err := Manifest(ctx, provider, image, platform)
+	if err != nil {
+		return ocispec.Descriptor{}, err
+	}
+	return manifest.Config, err
+}
+
+// Platforms returns one or more platforms supported by the image.
+func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) {
+	var platformSpecs []ocispec.Platform
+	return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		if desc.Platform != nil {
+			platformSpecs = append(platformSpecs, *desc.Platform)
+			return nil, ErrSkipDesc
+		}
+
+		switch desc.MediaType {
+		case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
+			p, err := content.ReadBlob(ctx, provider, desc.Digest)
+			if err != nil {
+				return nil, err
+			}
+
+			var image ocispec.Image
+			if err := json.Unmarshal(p, &image); err != nil {
+				return nil, err
+			}
+
+			platformSpecs = append(platformSpecs,
+				platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture}))
+		}
+		return nil, nil
+	}), ChildrenHandler(provider, "")), image)
+}
+
+// Check returns nil if the all components of an image are available in the
+// provider for the specified platform.
+//
+// If available is true, the caller can assume that required represents the
+// complete set of content required for the image.
+//
+// missing will have the components that are part of required but not avaiiable
+// in the provider.
+//
+// If there is a problem resolving content, an error will be returned.
+func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (available bool, required, present, missing []ocispec.Descriptor, err error) {
+	mfst, err := Manifest(ctx, provider, image, platform)
+	if err != nil {
+		if errdefs.IsNotFound(err) {
+			return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
+		}
+
+		return false, nil, nil, nil, errors.Wrap(err, "image check failed")
+	}
+
+	// TODO(stevvooe): It is possible that referenced conponents could have
+	// children, but this is rare. For now, we ignore this and only verify
+	// that manfiest components are present.
+	required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...)
+
+	for _, desc := range required {
+		ra, err := provider.ReaderAt(ctx, desc.Digest)
+		if err != nil {
+			if errdefs.IsNotFound(err) {
+				missing = append(missing, desc)
+				continue
+			} else {
+				return false, nil, nil, nil, err
+			}
+		}
+		ra.Close()
+		present = append(present, desc)
+
+	}
+
+	return true, required, present, missing, nil
+}
+
+// Children returns the immediate children of content described by the descriptor.
+func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor, platform string) ([]ocispec.Descriptor, error) {
+	var descs []ocispec.Descriptor
+	switch desc.MediaType {
+	case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+		p, err := content.ReadBlob(ctx, provider, desc.Digest)
+		if err != nil {
+			return nil, err
+		}
+
+		// TODO(stevvooe): We just assume oci manifest, for now. There may be
+		// subtle differences from the docker version.
+		var manifest ocispec.Manifest
+		if err := json.Unmarshal(p, &manifest); err != nil {
+			return nil, err
+		}
+
+		descs = append(descs, manifest.Config)
+		descs = append(descs, manifest.Layers...)
+	case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+		p, err := content.ReadBlob(ctx, provider, desc.Digest)
+		if err != nil {
+			return nil, err
+		}
+
+		var index ocispec.Index
+		if err := json.Unmarshal(p, &index); err != nil {
+			return nil, err
+		}
+
+		if platform != "" {
+			matcher, err := platforms.Parse(platform)
+			if err != nil {
+				return nil, err
+			}
+
+			for _, d := range index.Manifests {
+				if d.Platform == nil || matcher.Match(*d.Platform) {
+					descs = append(descs, d)
+				}
+			}
+		} else {
+			descs = append(descs, index.Manifests...)
+		}
+
+	case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
+		MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip,
+		MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
+		ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
+		ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip,
+		MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
+		// childless data types.
+		return nil, nil
+	default:
+		log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
+	}
+
+	return descs, nil
+}
+
+// RootFS returns the unpacked diffids that make up and images rootfs.
+//
+// These are used to verify that a set of layers unpacked to the expected
+// values.
+func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) {
+	p, err := content.ReadBlob(ctx, provider, configDesc.Digest)
+	if err != nil {
+		return nil, err
+	}
+
+	var config ocispec.Image
+	if err := json.Unmarshal(p, &config); err != nil {
+		return nil, err
+	}
+
+	// TODO(stevvooe): Remove this bit when OCI structure uses correct type for
+	// rootfs.DiffIDs.
+	var diffIDs []digest.Digest
+	for _, diffID := range config.RootFS.DiffIDs {
+		diffIDs = append(diffIDs, digest.Digest(diffID))
+	}
+
+	return diffIDs, nil
+}
diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go
new file mode 100644
index 0000000..f01f615
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/images/mediatypes.go
@@ -0,0 +1,23 @@
+package images
+
+// mediatype definitions for image components handled in containerd.
+//
+// oci components are generally referenced directly, although we may centralize
+// here for clarity.
+const (
+	MediaTypeDockerSchema2Layer            = "application/vnd.docker.image.rootfs.diff.tar"
+	MediaTypeDockerSchema2LayerForeign     = "application/vnd.docker.image.rootfs.foreign.diff.tar"
+	MediaTypeDockerSchema2LayerGzip        = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+	MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+	MediaTypeDockerSchema2Config           = "application/vnd.docker.container.image.v1+json"
+	MediaTypeDockerSchema2Manifest         = "application/vnd.docker.distribution.manifest.v2+json"
+	MediaTypeDockerSchema2ManifestList     = "application/vnd.docker.distribution.manifest.list.v2+json"
+	// Checkpoint/Restore Media Types
+	MediaTypeContainerd1Checkpoint        = "application/vnd.containerd.container.criu.checkpoint.criu.tar"
+	MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar"
+	MediaTypeContainerd1Resource          = "application/vnd.containerd.container.resource.tar"
+	MediaTypeContainerd1RW                = "application/vnd.containerd.container.rw.tar"
+	MediaTypeContainerd1CheckpointConfig  = "application/vnd.containerd.container.checkpoint.config.v1+proto"
+	// Legacy Docker schema1 manifest
+	MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+)
diff --git a/vendor/github.com/containerd/containerd/import.go b/vendor/github.com/containerd/containerd/import.go
new file mode 100644
index 0000000..9f8f9af
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/import.go
@@ -0,0 +1,120 @@
+package containerd
+
+import (
+	"archive/tar"
+	"context"
+	"encoding/json"
+	"io"
+	"io/ioutil"
+	"strings"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/reference"
+	digest "github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+func resolveOCIIndex(idx ocispec.Index, refObject string) (*ocispec.Descriptor, error) {
+	tag, dgst := reference.SplitObject(refObject)
+	if tag == "" && dgst == "" {
+		return nil, errors.Errorf("unexpected object: %q", refObject)
+	}
+	for _, m := range idx.Manifests {
+		if m.Digest == dgst {
+			return &m, nil
+		}
+		annot, ok := m.Annotations[ocispec.AnnotationRefName]
+		if ok && annot == tag && tag != "" {
+			return &m, nil
+		}
+	}
+	return nil, errors.Errorf("not found: %q", refObject)
+}
+
+func (c *Client) importFromOCITar(ctx context.Context, ref string, reader io.Reader, iopts importOpts) (Image, error) {
+	tr := tar.NewReader(reader)
+	store := c.ContentStore()
+	var desc *ocispec.Descriptor
+	for {
+		hdr, err := tr.Next()
+		if err == io.EOF {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+		if hdr.Typeflag != tar.TypeReg && hdr.Typeflag != tar.TypeRegA {
+			continue
+		}
+		if hdr.Name == "index.json" {
+			desc, err = onUntarIndexJSON(tr, iopts.refObject)
+			if err != nil {
+				return nil, err
+			}
+			continue
+		}
+		if strings.HasPrefix(hdr.Name, "blobs/") {
+			if err := onUntarBlob(ctx, tr, store, hdr.Name, hdr.Size); err != nil {
+				return nil, err
+			}
+		}
+	}
+	if desc == nil {
+		return nil, errors.Errorf("no descriptor found for reference object %q", iopts.refObject)
+	}
+	imgrec := images.Image{
+		Name:   ref,
+		Target: *desc,
+		Labels: iopts.labels,
+	}
+	is := c.ImageService()
+	if updated, err := is.Update(ctx, imgrec, "target"); err != nil {
+		if !errdefs.IsNotFound(err) {
+			return nil, err
+		}
+
+		created, err := is.Create(ctx, imgrec)
+		if err != nil {
+			return nil, err
+		}
+
+		imgrec = created
+	} else {
+		imgrec = updated
+	}
+
+	img := &image{
+		client: c,
+		i:      imgrec,
+	}
+	return img, nil
+}
+
+func onUntarIndexJSON(r io.Reader, refObject string) (*ocispec.Descriptor, error) {
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+	var idx ocispec.Index
+	if err := json.Unmarshal(b, &idx); err != nil {
+		return nil, err
+	}
+	return resolveOCIIndex(idx, refObject)
+}
+
+func onUntarBlob(ctx context.Context, r io.Reader, store content.Store, name string, size int64) error {
+	// name is like "blobs/sha256/deadbeef"
+	split := strings.Split(name, "/")
+	if len(split) != 3 {
+		return errors.Errorf("unexpected name: %q", name)
+	}
+	algo := digest.Algorithm(split[1])
+	if !algo.Available() {
+		return errors.Errorf("unsupported algorithm: %s", algo)
+	}
+	dgst := digest.NewDigestFromHex(algo.String(), split[2])
+	return content.WriteBlob(ctx, store, "unknown-"+dgst.String(), r, size, dgst)
+}
diff --git a/vendor/github.com/containerd/containerd/io.go b/vendor/github.com/containerd/containerd/io.go
new file mode 100644
index 0000000..48c06f1
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/io.go
@@ -0,0 +1,196 @@
+package containerd
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"os"
+	"sync"
+)
+
+// IOConfig holds the io configurations.
+type IOConfig struct {
+	// Terminal is true if one has been allocated
+	Terminal bool
+	// Stdin path
+	Stdin string
+	// Stdout path
+	Stdout string
+	// Stderr path
+	Stderr string
+}
+
+// IO holds the io information for a task or process
+type IO interface {
+	// Config returns the IO configuration.
+	Config() IOConfig
+	// Cancel aborts all current io operations
+	Cancel()
+	// Wait blocks until all io copy operations have completed
+	Wait()
+	// Close cleans up all open io resources
+	Close() error
+}
+
+// cio is a basic container IO implementation.
+type cio struct {
+	config IOConfig
+
+	closer *wgCloser
+}
+
+func (c *cio) Config() IOConfig {
+	return c.config
+}
+
+func (c *cio) Cancel() {
+	if c.closer == nil {
+		return
+	}
+	c.closer.Cancel()
+}
+
+func (c *cio) Wait() {
+	if c.closer == nil {
+		return
+	}
+	c.closer.Wait()
+}
+
+func (c *cio) Close() error {
+	if c.closer == nil {
+		return nil
+	}
+	return c.closer.Close()
+}
+
+// IOCreation creates new IO sets for a task
+type IOCreation func(id string) (IO, error)
+
+// IOAttach allows callers to reattach to running tasks
+//
+// There should only be one reader for a task's IO set
+// because fifo's can only be read from one reader or the output
+// will be sent only to the first reads
+type IOAttach func(*FIFOSet) (IO, error)
+
+// NewIO returns an IOCreation that will provide IO sets without a terminal
+func NewIO(stdin io.Reader, stdout, stderr io.Writer) IOCreation {
+	return NewIOWithTerminal(stdin, stdout, stderr, false)
+}
+
+// NewIOWithTerminal creates a new io set with the provied io.Reader/Writers for use with a terminal
+func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) IOCreation {
+	return func(id string) (_ IO, err error) {
+		paths, err := NewFifos(id)
+		if err != nil {
+			return nil, err
+		}
+		defer func() {
+			if err != nil && paths.Dir != "" {
+				os.RemoveAll(paths.Dir)
+			}
+		}()
+		cfg := IOConfig{
+			Terminal: terminal,
+			Stdout:   paths.Out,
+			Stderr:   paths.Err,
+			Stdin:    paths.In,
+		}
+		i := &cio{config: cfg}
+		set := &ioSet{
+			in:  stdin,
+			out: stdout,
+			err: stderr,
+		}
+		closer, err := copyIO(paths, set, cfg.Terminal)
+		if err != nil {
+			return nil, err
+		}
+		i.closer = closer
+		return i, nil
+	}
+}
+
+// WithAttach attaches the existing io for a task to the provided io.Reader/Writers
+func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach {
+	return func(paths *FIFOSet) (IO, error) {
+		if paths == nil {
+			return nil, fmt.Errorf("cannot attach to existing fifos")
+		}
+		cfg := IOConfig{
+			Terminal: paths.Terminal,
+			Stdout:   paths.Out,
+			Stderr:   paths.Err,
+			Stdin:    paths.In,
+		}
+		i := &cio{config: cfg}
+		set := &ioSet{
+			in:  stdin,
+			out: stdout,
+			err: stderr,
+		}
+		closer, err := copyIO(paths, set, cfg.Terminal)
+		if err != nil {
+			return nil, err
+		}
+		i.closer = closer
+		return i, nil
+	}
+}
+
+// Stdio returns an IO set to be used for a task
+// that outputs the container's IO as the current processes Stdio
+func Stdio(id string) (IO, error) {
+	return NewIO(os.Stdin, os.Stdout, os.Stderr)(id)
+}
+
+// StdioTerminal will setup the IO for the task to use a terminal
+func StdioTerminal(id string) (IO, error) {
+	return NewIOWithTerminal(os.Stdin, os.Stdout, os.Stderr, true)(id)
+}
+
+// NullIO redirects the container's IO into /dev/null
+func NullIO(id string) (IO, error) {
+	return &cio{}, nil
+}
+
+// FIFOSet is a set of fifos for use with tasks
+type FIFOSet struct {
+	// Dir is the directory holding the task fifos
+	Dir string
+	// In, Out, and Err fifo paths
+	In, Out, Err string
+	// Terminal returns true if a terminal is being used for the task
+	Terminal bool
+}
+
+type ioSet struct {
+	in       io.Reader
+	out, err io.Writer
+}
+
+type wgCloser struct {
+	wg     *sync.WaitGroup
+	dir    string
+	set    []io.Closer
+	cancel context.CancelFunc
+}
+
+func (g *wgCloser) Wait() {
+	g.wg.Wait()
+}
+
+func (g *wgCloser) Close() error {
+	for _, f := range g.set {
+		f.Close()
+	}
+	if g.dir != "" {
+		return os.RemoveAll(g.dir)
+	}
+	return nil
+}
+
+func (g *wgCloser) Cancel() {
+	g.cancel()
+}
diff --git a/vendor/github.com/containerd/containerd/io_unix.go b/vendor/github.com/containerd/containerd/io_unix.go
new file mode 100644
index 0000000..432553f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/io_unix.go
@@ -0,0 +1,184 @@
+// +build !windows
+
+package containerd
+
+import (
+	"context"
+	"io"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"sync"
+	"syscall"
+
+	"github.com/containerd/fifo"
+)
+
+// NewFifos returns a new set of fifos for the task
+func NewFifos(id string) (*FIFOSet, error) {
+	root := filepath.Join(os.TempDir(), "containerd")
+	if err := os.MkdirAll(root, 0700); err != nil {
+		return nil, err
+	}
+	dir, err := ioutil.TempDir(root, "")
+	if err != nil {
+		return nil, err
+	}
+	return &FIFOSet{
+		Dir: dir,
+		In:  filepath.Join(dir, id+"-stdin"),
+		Out: filepath.Join(dir, id+"-stdout"),
+		Err: filepath.Join(dir, id+"-stderr"),
+	}, nil
+}
+
+func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
+	var (
+		f           io.ReadWriteCloser
+		set         []io.Closer
+		ctx, cancel = context.WithCancel(context.Background())
+		wg          = &sync.WaitGroup{}
+	)
+	defer func() {
+		if err != nil {
+			for _, f := range set {
+				f.Close()
+			}
+			cancel()
+		}
+	}()
+
+	if f, err = fifo.OpenFifo(ctx, fifos.In, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+		return nil, err
+	}
+	set = append(set, f)
+	go func(w io.WriteCloser) {
+		io.Copy(w, ioset.in)
+		w.Close()
+	}(f)
+
+	if f, err = fifo.OpenFifo(ctx, fifos.Out, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+		return nil, err
+	}
+	set = append(set, f)
+	wg.Add(1)
+	go func(r io.ReadCloser) {
+		io.Copy(ioset.out, r)
+		r.Close()
+		wg.Done()
+	}(f)
+
+	if f, err = fifo.OpenFifo(ctx, fifos.Err, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+		return nil, err
+	}
+	set = append(set, f)
+
+	if !tty {
+		wg.Add(1)
+		go func(r io.ReadCloser) {
+			io.Copy(ioset.err, r)
+			r.Close()
+			wg.Done()
+		}(f)
+	}
+	return &wgCloser{
+		wg:     wg,
+		dir:    fifos.Dir,
+		set:    set,
+		cancel: cancel,
+	}, nil
+}
+
+// NewDirectIO returns an IO implementation that exposes the pipes directly
+func NewDirectIO(ctx context.Context, terminal bool) (*DirectIO, error) {
+	set, err := NewFifos("")
+	if err != nil {
+		return nil, err
+	}
+	f := &DirectIO{
+		set:      set,
+		terminal: terminal,
+	}
+	defer func() {
+		if err != nil {
+			f.Delete()
+		}
+	}()
+	if f.Stdin, err = fifo.OpenFifo(ctx, set.In, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+		return nil, err
+	}
+	if f.Stdout, err = fifo.OpenFifo(ctx, set.Out, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+		f.Stdin.Close()
+		return nil, err
+	}
+	if f.Stderr, err = fifo.OpenFifo(ctx, set.Err, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
+		f.Stdin.Close()
+		f.Stdout.Close()
+		return nil, err
+	}
+	return f, nil
+}
+
+// DirectIO allows task IO to be handled externally by the caller
+type DirectIO struct {
+	Stdin  io.WriteCloser
+	Stdout io.ReadCloser
+	Stderr io.ReadCloser
+
+	set      *FIFOSet
+	terminal bool
+}
+
+// IOCreate returns IO avaliable for use with task creation
+func (f *DirectIO) IOCreate(id string) (IO, error) {
+	return f, nil
+}
+
+// IOAttach returns IO avaliable for use with task attachment
+func (f *DirectIO) IOAttach(set *FIFOSet) (IO, error) {
+	return f, nil
+}
+
+// Config returns the IOConfig
+func (f *DirectIO) Config() IOConfig {
+	return IOConfig{
+		Terminal: f.terminal,
+		Stdin:    f.set.In,
+		Stdout:   f.set.Out,
+		Stderr:   f.set.Err,
+	}
+}
+
+// Cancel stops any IO copy operations
+//
+// Not applicable for DirectIO
+func (f *DirectIO) Cancel() {
+	// nothing to cancel as all operations are handled externally
+}
+
+// Wait on any IO copy operations
+//
+// Not applicable for DirectIO
+func (f *DirectIO) Wait() {
+	// nothing to wait on as all operations are handled externally
+}
+
+// Close closes all open fds
+func (f *DirectIO) Close() error {
+	err := f.Stdin.Close()
+	if err2 := f.Stdout.Close(); err == nil {
+		err = err2
+	}
+	if err2 := f.Stderr.Close(); err == nil {
+		err = err2
+	}
+	return err
+}
+
+// Delete removes the underlying directory containing fifos
+func (f *DirectIO) Delete() error {
+	if f.set.Dir == "" {
+		return nil
+	}
+	return os.RemoveAll(f.set.Dir)
+}
diff --git a/vendor/github.com/containerd/containerd/io_windows.go b/vendor/github.com/containerd/containerd/io_windows.go
new file mode 100644
index 0000000..e37568c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/io_windows.go
@@ -0,0 +1,117 @@
+package containerd
+
+import (
+	"fmt"
+	"io"
+	"net"
+	"sync"
+
+	winio "github.com/Microsoft/go-winio"
+	"github.com/containerd/containerd/log"
+	"github.com/pkg/errors"
+)
+
+const pipeRoot = `\\.\pipe`
+
+// NewFifos returns a new set of fifos for the task
+func NewFifos(id string) (*FIFOSet, error) {
+	return &FIFOSet{
+		In:  fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id),
+		Out: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id),
+		Err: fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id),
+	}, nil
+}
+
+func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
+	var (
+		wg  sync.WaitGroup
+		set []io.Closer
+	)
+
+	if fifos.In != "" {
+		l, err := winio.ListenPipe(fifos.In, nil)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.In)
+		}
+		defer func(l net.Listener) {
+			if err != nil {
+				l.Close()
+			}
+		}(l)
+		set = append(set, l)
+
+		go func() {
+			c, err := l.Accept()
+			if err != nil {
+				log.L.WithError(err).Errorf("failed to accept stdin connection on %s", fifos.In)
+				return
+			}
+			io.Copy(c, ioset.in)
+			c.Close()
+			l.Close()
+		}()
+	}
+
+	if fifos.Out != "" {
+		l, err := winio.ListenPipe(fifos.Out, nil)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to create stdin pipe %s", fifos.Out)
+		}
+		defer func(l net.Listener) {
+			if err != nil {
+				l.Close()
+			}
+		}(l)
+		set = append(set, l)
+
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			c, err := l.Accept()
+			if err != nil {
+				log.L.WithError(err).Errorf("failed to accept stdout connection on %s", fifos.Out)
+				return
+			}
+			io.Copy(ioset.out, c)
+			c.Close()
+			l.Close()
+		}()
+	}
+
+	if !tty && fifos.Err != "" {
+		l, err := winio.ListenPipe(fifos.Err, nil)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to create stderr pipe %s", fifos.Err)
+		}
+		defer func(l net.Listener) {
+			if err != nil {
+				l.Close()
+			}
+		}(l)
+		set = append(set, l)
+
+		wg.Add(1)
+		go func() {
+			defer wg.Done()
+			c, err := l.Accept()
+			if err != nil {
+				log.L.WithError(err).Errorf("failed to accept stderr connection on %s", fifos.Err)
+				return
+			}
+			io.Copy(ioset.err, c)
+			c.Close()
+			l.Close()
+		}()
+	}
+
+	return &wgCloser{
+		wg:  &wg,
+		dir: fifos.Dir,
+		set: set,
+		cancel: func() {
+			for _, l := range set {
+				l.Close()
+			}
+		},
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go
new file mode 100644
index 0000000..b05fe18
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/labels/validate.go
@@ -0,0 +1,21 @@
+package labels
+
+import (
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+)
+
+const (
+	maxSize = 4096
+)
+
+// Validate a label's key and value are under 4096 bytes
+func Validate(k, v string) error {
+	if (len(k) + len(v)) > maxSize {
+		if len(k) > 10 {
+			k = k[:10]
+		}
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k)
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/bundle.go b/vendor/github.com/containerd/containerd/linux/bundle.go
new file mode 100644
index 0000000..3dbfef9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/bundle.go
@@ -0,0 +1,140 @@
+// +build linux
+
+package linux
+
+import (
+	"bytes"
+	"context"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/linux/runcopts"
+	client "github.com/containerd/containerd/linux/shim"
+	"github.com/pkg/errors"
+)
+
+// loadBundle loads an existing bundle from disk
+func loadBundle(id, path, workdir string) *bundle {
+	return &bundle{
+		id:      id,
+		path:    path,
+		workDir: workdir,
+	}
+}
+
+// newBundle creates a new bundle on disk at the provided path for the given id
+func newBundle(id, path, workDir string, spec []byte) (b *bundle, err error) {
+	if err := os.MkdirAll(path, 0711); err != nil {
+		return nil, err
+	}
+	path = filepath.Join(path, id)
+	defer func() {
+		if err != nil {
+			os.RemoveAll(path)
+		}
+	}()
+	workDir = filepath.Join(workDir, id)
+	if err := os.MkdirAll(workDir, 0711); err != nil {
+		return nil, err
+	}
+	defer func() {
+		if err != nil {
+			os.RemoveAll(workDir)
+		}
+	}()
+
+	if err := os.Mkdir(path, 0711); err != nil {
+		return nil, err
+	}
+	if err := os.Mkdir(filepath.Join(path, "rootfs"), 0711); err != nil {
+		return nil, err
+	}
+	f, err := os.Create(filepath.Join(path, configFilename))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+	_, err = io.Copy(f, bytes.NewReader(spec))
+	return &bundle{
+		id:      id,
+		path:    path,
+		workDir: workDir,
+	}, err
+}
+
+type bundle struct {
+	id      string
+	path    string
+	workDir string
+}
+
+type shimOpt func(*bundle, string, *runcopts.RuncOptions) (client.Config, client.ClientOpt)
+
+// ShimRemote is a shimOpt for connecting and starting a remote shim
+func ShimRemote(shim, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) shimOpt {
+	return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (client.Config, client.ClientOpt) {
+		return b.shimConfig(ns, ropts),
+			client.WithStart(shim, b.shimAddress(ns), daemonAddress, cgroup, nonewns, debug, exitHandler)
+	}
+}
+
+// ShimLocal is a shimOpt for using an in process shim implementation
+func ShimLocal(exchange *events.Exchange) shimOpt {
+	return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (client.Config, client.ClientOpt) {
+		return b.shimConfig(ns, ropts), client.WithLocal(exchange)
+	}
+}
+
+// ShimConnect is a shimOpt for connecting to an existing remote shim
+func ShimConnect() shimOpt {
+	return func(b *bundle, ns string, ropts *runcopts.RuncOptions) (client.Config, client.ClientOpt) {
+		return b.shimConfig(ns, ropts), client.WithConnect(b.shimAddress(ns))
+	}
+}
+
+// NewShimClient connects to the shim managing the bundle and tasks creating it if needed
+func (b *bundle) NewShimClient(ctx context.Context, namespace string, getClientOpts shimOpt, runcOpts *runcopts.RuncOptions) (*client.Client, error) {
+	cfg, opt := getClientOpts(b, namespace, runcOpts)
+	return client.New(ctx, cfg, opt)
+}
+
+// Delete deletes the bundle from disk
+func (b *bundle) Delete() error {
+	err := os.RemoveAll(b.path)
+	if err == nil {
+		return os.RemoveAll(b.workDir)
+	}
+	// error removing the bundle path; still attempt removing work dir
+	err2 := os.RemoveAll(b.workDir)
+	if err2 == nil {
+		return err
+	}
+	return errors.Wrapf(err, "Failed to remove both bundle and workdir locations: %v", err2)
+}
+
+func (b *bundle) shimAddress(namespace string) string {
+	return filepath.Join(string(filepath.Separator), "containerd-shim", namespace, b.id, "shim.sock")
+}
+
+func (b *bundle) shimConfig(namespace string, runcOptions *runcopts.RuncOptions) client.Config {
+	var (
+		criuPath      string
+		runtimeRoot   string
+		systemdCgroup bool
+	)
+	if runcOptions != nil {
+		criuPath = runcOptions.CriuPath
+		systemdCgroup = runcOptions.SystemdCgroup
+		runtimeRoot = runcOptions.RuntimeRoot
+	}
+	return client.Config{
+		Path:          b.path,
+		WorkDir:       b.workDir,
+		Namespace:     namespace,
+		Criu:          criuPath,
+		RuntimeRoot:   runtimeRoot,
+		SystemdCgroup: systemdCgroup,
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/linux/process.go b/vendor/github.com/containerd/containerd/linux/process.go
new file mode 100644
index 0000000..0febff9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/process.go
@@ -0,0 +1,120 @@
+// +build linux
+
+package linux
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/api/types/task"
+	"github.com/containerd/containerd/errdefs"
+	shim "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/containerd/runtime"
+)
+
+// Process implements a linux process
+type Process struct {
+	id string
+	t  *Task
+}
+
+// ID of the process
+func (p *Process) ID() string {
+	return p.id
+}
+
+// Kill sends the provided signal to the underlying process
+//
+// Unable to kill all processes in the task using this method on a process
+func (p *Process) Kill(ctx context.Context, signal uint32, _ bool) error {
+	_, err := p.t.shim.Kill(ctx, &shim.KillRequest{
+		Signal: signal,
+		ID:     p.id,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return err
+}
+
+// State of process
+func (p *Process) State(ctx context.Context) (runtime.State, error) {
+	// use the container status for the status of the process
+	response, err := p.t.shim.State(ctx, &shim.StateRequest{
+		ID: p.id,
+	})
+	if err != nil {
+		return runtime.State{}, errdefs.FromGRPC(err)
+	}
+	var status runtime.Status
+	switch response.Status {
+	case task.StatusCreated:
+		status = runtime.CreatedStatus
+	case task.StatusRunning:
+		status = runtime.RunningStatus
+	case task.StatusStopped:
+		status = runtime.StoppedStatus
+	case task.StatusPaused:
+		status = runtime.PausedStatus
+	case task.StatusPausing:
+		status = runtime.PausingStatus
+	}
+	return runtime.State{
+		Pid:        response.Pid,
+		Status:     status,
+		Stdin:      response.Stdin,
+		Stdout:     response.Stdout,
+		Stderr:     response.Stderr,
+		Terminal:   response.Terminal,
+		ExitStatus: response.ExitStatus,
+	}, nil
+}
+
+// ResizePty changes the side of the process's PTY to the provided width and height
+func (p *Process) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
+	_, err := p.t.shim.ResizePty(ctx, &shim.ResizePtyRequest{
+		ID:     p.id,
+		Width:  size.Width,
+		Height: size.Height,
+	})
+	if err != nil {
+		err = errdefs.FromGRPC(err)
+	}
+	return err
+}
+
+// CloseIO closes the provided IO pipe for the process
+func (p *Process) CloseIO(ctx context.Context) error {
+	_, err := p.t.shim.CloseIO(ctx, &shim.CloseIORequest{
+		ID:    p.id,
+		Stdin: true,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return nil
+}
+
+// Start the process
+func (p *Process) Start(ctx context.Context) error {
+	_, err := p.t.shim.Start(ctx, &shim.StartRequest{
+		ID: p.id,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return nil
+}
+
+// Wait on the process to exit and return the exit status and timestamp
+func (p *Process) Wait(ctx context.Context) (*runtime.Exit, error) {
+	r, err := p.t.shim.Wait(ctx, &shim.WaitRequest{
+		ID: p.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+	return &runtime.Exit{
+		Timestamp: r.ExitedAt,
+		Status:    r.ExitStatus,
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go b/vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go
new file mode 100644
index 0000000..0415e23
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/runcopts/runc.pb.go
@@ -0,0 +1,1332 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/linux/runcopts/runc.proto
+// DO NOT EDIT!
+
+/*
+	Package runcopts is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/linux/runcopts/runc.proto
+
+	It has these top-level messages:
+		RuncOptions
+		CreateOptions
+		CheckpointOptions
+*/
+package runcopts
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type RuncOptions struct {
+	Runtime       string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"`
+	RuntimeRoot   string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"`
+	CriuPath      string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"`
+	SystemdCgroup bool   `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"`
+}
+
+func (m *RuncOptions) Reset()                    { *m = RuncOptions{} }
+func (*RuncOptions) ProtoMessage()               {}
+func (*RuncOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{0} }
+
+type CreateOptions struct {
+	NoPivotRoot         bool     `protobuf:"varint,1,opt,name=no_pivot_root,json=noPivotRoot,proto3" json:"no_pivot_root,omitempty"`
+	OpenTcp             bool     `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
+	ExternalUnixSockets bool     `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
+	Terminal            bool     `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	FileLocks           bool     `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
+	EmptyNamespaces     []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
+	CgroupsMode         string   `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
+	NoNewKeyring        bool     `protobuf:"varint,8,opt,name=no_new_keyring,json=noNewKeyring,proto3" json:"no_new_keyring,omitempty"`
+	ShimCgroup          string   `protobuf:"bytes,9,opt,name=shim_cgroup,json=shimCgroup,proto3" json:"shim_cgroup,omitempty"`
+	IoUid               uint32   `protobuf:"varint,10,opt,name=io_uid,json=ioUid,proto3" json:"io_uid,omitempty"`
+	IoGid               uint32   `protobuf:"varint,11,opt,name=io_gid,json=ioGid,proto3" json:"io_gid,omitempty"`
+}
+
+func (m *CreateOptions) Reset()                    { *m = CreateOptions{} }
+func (*CreateOptions) ProtoMessage()               {}
+func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{1} }
+
+type CheckpointOptions struct {
+	Exit                bool     `protobuf:"varint,1,opt,name=exit,proto3" json:"exit,omitempty"`
+	OpenTcp             bool     `protobuf:"varint,2,opt,name=open_tcp,json=openTcp,proto3" json:"open_tcp,omitempty"`
+	ExternalUnixSockets bool     `protobuf:"varint,3,opt,name=external_unix_sockets,json=externalUnixSockets,proto3" json:"external_unix_sockets,omitempty"`
+	Terminal            bool     `protobuf:"varint,4,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	FileLocks           bool     `protobuf:"varint,5,opt,name=file_locks,json=fileLocks,proto3" json:"file_locks,omitempty"`
+	EmptyNamespaces     []string `protobuf:"bytes,6,rep,name=empty_namespaces,json=emptyNamespaces" json:"empty_namespaces,omitempty"`
+	CgroupsMode         string   `protobuf:"bytes,7,opt,name=cgroups_mode,json=cgroupsMode,proto3" json:"cgroups_mode,omitempty"`
+}
+
+func (m *CheckpointOptions) Reset()                    { *m = CheckpointOptions{} }
+func (*CheckpointOptions) ProtoMessage()               {}
+func (*CheckpointOptions) Descriptor() ([]byte, []int) { return fileDescriptorRunc, []int{2} }
+
+func init() {
+	proto.RegisterType((*RuncOptions)(nil), "containerd.linux.runc.RuncOptions")
+	proto.RegisterType((*CreateOptions)(nil), "containerd.linux.runc.CreateOptions")
+	proto.RegisterType((*CheckpointOptions)(nil), "containerd.linux.runc.CheckpointOptions")
+}
+func (m *RuncOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RuncOptions) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Runtime) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.Runtime)))
+		i += copy(dAtA[i:], m.Runtime)
+	}
+	if len(m.RuntimeRoot) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.RuntimeRoot)))
+		i += copy(dAtA[i:], m.RuntimeRoot)
+	}
+	if len(m.CriuPath) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuPath)))
+		i += copy(dAtA[i:], m.CriuPath)
+	}
+	if m.SystemdCgroup {
+		dAtA[i] = 0x20
+		i++
+		if m.SystemdCgroup {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	return i, nil
+}
+
+func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.NoPivotRoot {
+		dAtA[i] = 0x8
+		i++
+		if m.NoPivotRoot {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.OpenTcp {
+		dAtA[i] = 0x10
+		i++
+		if m.OpenTcp {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.ExternalUnixSockets {
+		dAtA[i] = 0x18
+		i++
+		if m.ExternalUnixSockets {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.Terminal {
+		dAtA[i] = 0x20
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.FileLocks {
+		dAtA[i] = 0x28
+		i++
+		if m.FileLocks {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.EmptyNamespaces) > 0 {
+		for _, s := range m.EmptyNamespaces {
+			dAtA[i] = 0x32
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.CgroupsMode) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode)))
+		i += copy(dAtA[i:], m.CgroupsMode)
+	}
+	if m.NoNewKeyring {
+		dAtA[i] = 0x40
+		i++
+		if m.NoNewKeyring {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.ShimCgroup) > 0 {
+		dAtA[i] = 0x4a
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.ShimCgroup)))
+		i += copy(dAtA[i:], m.ShimCgroup)
+	}
+	if m.IoUid != 0 {
+		dAtA[i] = 0x50
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(m.IoUid))
+	}
+	if m.IoGid != 0 {
+		dAtA[i] = 0x58
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(m.IoGid))
+	}
+	return i, nil
+}
+
+func (m *CheckpointOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CheckpointOptions) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Exit {
+		dAtA[i] = 0x8
+		i++
+		if m.Exit {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.OpenTcp {
+		dAtA[i] = 0x10
+		i++
+		if m.OpenTcp {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.ExternalUnixSockets {
+		dAtA[i] = 0x18
+		i++
+		if m.ExternalUnixSockets {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.Terminal {
+		dAtA[i] = 0x20
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.FileLocks {
+		dAtA[i] = 0x28
+		i++
+		if m.FileLocks {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.EmptyNamespaces) > 0 {
+		for _, s := range m.EmptyNamespaces {
+			dAtA[i] = 0x32
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.CgroupsMode) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintRunc(dAtA, i, uint64(len(m.CgroupsMode)))
+		i += copy(dAtA[i:], m.CgroupsMode)
+	}
+	return i, nil
+}
+
+func encodeFixed64Runc(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Runc(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintRunc(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *RuncOptions) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Runtime)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	l = len(m.RuntimeRoot)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	l = len(m.CriuPath)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	if m.SystemdCgroup {
+		n += 2
+	}
+	return n
+}
+
+func (m *CreateOptions) Size() (n int) {
+	var l int
+	_ = l
+	if m.NoPivotRoot {
+		n += 2
+	}
+	if m.OpenTcp {
+		n += 2
+	}
+	if m.ExternalUnixSockets {
+		n += 2
+	}
+	if m.Terminal {
+		n += 2
+	}
+	if m.FileLocks {
+		n += 2
+	}
+	if len(m.EmptyNamespaces) > 0 {
+		for _, s := range m.EmptyNamespaces {
+			l = len(s)
+			n += 1 + l + sovRunc(uint64(l))
+		}
+	}
+	l = len(m.CgroupsMode)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	if m.NoNewKeyring {
+		n += 2
+	}
+	l = len(m.ShimCgroup)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	if m.IoUid != 0 {
+		n += 1 + sovRunc(uint64(m.IoUid))
+	}
+	if m.IoGid != 0 {
+		n += 1 + sovRunc(uint64(m.IoGid))
+	}
+	return n
+}
+
+func (m *CheckpointOptions) Size() (n int) {
+	var l int
+	_ = l
+	if m.Exit {
+		n += 2
+	}
+	if m.OpenTcp {
+		n += 2
+	}
+	if m.ExternalUnixSockets {
+		n += 2
+	}
+	if m.Terminal {
+		n += 2
+	}
+	if m.FileLocks {
+		n += 2
+	}
+	if len(m.EmptyNamespaces) > 0 {
+		for _, s := range m.EmptyNamespaces {
+			l = len(s)
+			n += 1 + l + sovRunc(uint64(l))
+		}
+	}
+	l = len(m.CgroupsMode)
+	if l > 0 {
+		n += 1 + l + sovRunc(uint64(l))
+	}
+	return n
+}
+
+func sovRunc(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozRunc(x uint64) (n int) {
+	return sovRunc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *RuncOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RuncOptions{`,
+		`Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`,
+		`RuntimeRoot:` + fmt.Sprintf("%v", this.RuntimeRoot) + `,`,
+		`CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`,
+		`SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateOptions{`,
+		`NoPivotRoot:` + fmt.Sprintf("%v", this.NoPivotRoot) + `,`,
+		`OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`,
+		`ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`,
+		`EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`,
+		`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
+		`NoNewKeyring:` + fmt.Sprintf("%v", this.NoNewKeyring) + `,`,
+		`ShimCgroup:` + fmt.Sprintf("%v", this.ShimCgroup) + `,`,
+		`IoUid:` + fmt.Sprintf("%v", this.IoUid) + `,`,
+		`IoGid:` + fmt.Sprintf("%v", this.IoGid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CheckpointOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CheckpointOptions{`,
+		`Exit:` + fmt.Sprintf("%v", this.Exit) + `,`,
+		`OpenTcp:` + fmt.Sprintf("%v", this.OpenTcp) + `,`,
+		`ExternalUnixSockets:` + fmt.Sprintf("%v", this.ExternalUnixSockets) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`FileLocks:` + fmt.Sprintf("%v", this.FileLocks) + `,`,
+		`EmptyNamespaces:` + fmt.Sprintf("%v", this.EmptyNamespaces) + `,`,
+		`CgroupsMode:` + fmt.Sprintf("%v", this.CgroupsMode) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringRunc(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *RuncOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRunc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RuncOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RuncOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Runtime = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RuntimeRoot", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RuntimeRoot = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CriuPath", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CriuPath = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SystemdCgroup", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.SystemdCgroup = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRunc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRunc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRunc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NoPivotRoot", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.NoPivotRoot = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.OpenTcp = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ExternalUnixSockets = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.FileLocks = bool(v != 0)
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CgroupsMode = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NoNewKeyring", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.NoNewKeyring = bool(v != 0)
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShimCgroup", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ShimCgroup = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoUid", wireType)
+			}
+			m.IoUid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.IoUid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IoGid", wireType)
+			}
+			m.IoGid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.IoGid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRunc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRunc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CheckpointOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRunc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CheckpointOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CheckpointOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Exit", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Exit = bool(v != 0)
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field OpenTcp", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.OpenTcp = bool(v != 0)
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExternalUnixSockets", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ExternalUnixSockets = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FileLocks", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.FileLocks = bool(v != 0)
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field EmptyNamespaces", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.EmptyNamespaces = append(m.EmptyNamespaces, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CgroupsMode", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRunc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CgroupsMode = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRunc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRunc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRunc(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRunc
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRunc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthRunc
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRunc
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRunc(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRunc = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRunc   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/linux/runcopts/runc.proto", fileDescriptorRunc)
+}
+
+var fileDescriptorRunc = []byte{
+	// 495 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x93, 0x31, 0x6f, 0xd3, 0x40,
+	0x14, 0xc7, 0x7b, 0xb4, 0x4d, 0x9c, 0x4b, 0x53, 0xe0, 0x20, 0xd2, 0x51, 0x84, 0x09, 0x11, 0x48,
+	0x61, 0x49, 0x24, 0x58, 0x10, 0x6c, 0x64, 0x60, 0x00, 0x4a, 0x65, 0xe8, 0xc2, 0x72, 0x72, 0xcf,
+	0x87, 0xf3, 0x94, 0xf8, 0xde, 0xe9, 0xee, 0x4c, 0x93, 0xad, 0x9f, 0x80, 0xcf, 0xd5, 0x91, 0x91,
+	0x91, 0xe6, 0x8b, 0x80, 0x7c, 0xb6, 0x0b, 0x2b, 0x2b, 0xdb, 0xff, 0xfd, 0xfe, 0xcf, 0x7e, 0x4f,
+	0xff, 0xd3, 0xa3, 0x2f, 0x73, 0xf0, 0x8b, 0xf2, 0x6c, 0x2a, 0xb1, 0x98, 0x49, 0xd4, 0x3e, 0x05,
+	0xad, 0x6c, 0xf6, 0xb7, 0x5c, 0x81, 0x2e, 0xd7, 0x33, 0x5b, 0x6a, 0x89, 0xc6, 0xbb, 0x20, 0xa6,
+	0xc6, 0xa2, 0x47, 0x36, 0xfc, 0xd3, 0x35, 0x0d, 0x5d, 0xd3, 0xca, 0x3c, 0xba, 0x9b, 0x63, 0x8e,
+	0xa1, 0x63, 0x56, 0xa9, 0xba, 0x79, 0xfc, 0x8d, 0xd0, 0x7e, 0x52, 0x6a, 0xf9, 0xc1, 0x78, 0x40,
+	0xed, 0x18, 0xa7, 0x5d, 0x5b, 0x6a, 0x0f, 0x85, 0xe2, 0x64, 0x44, 0x26, 0xbd, 0xa4, 0x2d, 0xd9,
+	0x23, 0x7a, 0xd0, 0x48, 0x61, 0x11, 0x3d, 0xbf, 0x11, 0xec, 0x7e, 0xc3, 0x12, 0x44, 0xcf, 0xee,
+	0xd3, 0x9e, 0xb4, 0x50, 0x0a, 0x93, 0xfa, 0x05, 0xdf, 0x0d, 0x7e, 0x54, 0x81, 0x93, 0xd4, 0x2f,
+	0xd8, 0x13, 0x7a, 0xe8, 0x36, 0xce, 0xab, 0x22, 0x13, 0x32, 0xb7, 0x58, 0x1a, 0xbe, 0x37, 0x22,
+	0x93, 0x28, 0x19, 0x34, 0x74, 0x1e, 0xe0, 0xf8, 0x62, 0x97, 0x0e, 0xe6, 0x56, 0xa5, 0x5e, 0xb5,
+	0x2b, 0x8d, 0xe9, 0x40, 0xa3, 0x30, 0xf0, 0x15, 0x7d, 0x3d, 0x99, 0x84, 0xef, 0xfa, 0x1a, 0x4f,
+	0x2a, 0x16, 0x26, 0xdf, 0xa3, 0x11, 0x1a, 0xa5, 0x85, 0x97, 0x26, 0x2c, 0x16, 0x25, 0xdd, 0xaa,
+	0xfe, 0x24, 0x0d, 0x7b, 0x46, 0x87, 0x6a, 0xed, 0x95, 0xd5, 0xe9, 0x4a, 0x94, 0x1a, 0xd6, 0xc2,
+	0xa1, 0x5c, 0x2a, 0xef, 0xc2, 0x82, 0x51, 0x72, 0xa7, 0x35, 0x4f, 0x35, 0xac, 0x3f, 0xd6, 0x16,
+	0x3b, 0xa2, 0x91, 0x57, 0xb6, 0x00, 0x9d, 0xae, 0x9a, 0x2d, 0xaf, 0x6b, 0xf6, 0x80, 0xd2, 0x2f,
+	0xb0, 0x52, 0x62, 0x85, 0x72, 0xe9, 0xf8, 0x7e, 0x70, 0x7b, 0x15, 0x79, 0x57, 0x01, 0xf6, 0x94,
+	0xde, 0x52, 0x85, 0xf1, 0x1b, 0xa1, 0xd3, 0x42, 0x39, 0x93, 0x4a, 0xe5, 0x78, 0x67, 0xb4, 0x3b,
+	0xe9, 0x25, 0x37, 0x03, 0x3f, 0xbe, 0xc6, 0x55, 0xa2, 0x75, 0x12, 0x4e, 0x14, 0x98, 0x29, 0xde,
+	0xad, 0x13, 0x6d, 0xd8, 0x7b, 0xcc, 0x14, 0x7b, 0x4c, 0x0f, 0x35, 0x0a, 0xad, 0xce, 0xc5, 0x52,
+	0x6d, 0x2c, 0xe8, 0x9c, 0x47, 0x61, 0xe0, 0x81, 0xc6, 0x63, 0x75, 0xfe, 0xb6, 0x66, 0xec, 0x21,
+	0xed, 0xbb, 0x05, 0x14, 0x6d, 0xae, 0xbd, 0xf0, 0x1f, 0x5a, 0xa1, 0x3a, 0x54, 0x36, 0xa4, 0x1d,
+	0x40, 0x51, 0x42, 0xc6, 0xe9, 0x88, 0x4c, 0x06, 0xc9, 0x3e, 0xe0, 0x29, 0x64, 0x0d, 0xce, 0x21,
+	0xe3, 0xfd, 0x16, 0xbf, 0x81, 0x6c, 0xfc, 0x8b, 0xd0, 0xdb, 0xf3, 0x85, 0x92, 0x4b, 0x83, 0xa0,
+	0x7d, 0xfb, 0x0c, 0x8c, 0xee, 0xa9, 0x35, 0xb4, 0xe9, 0x07, 0xfd, 0xbf, 0xc6, 0xfe, 0x3a, 0xb9,
+	0xbc, 0x8a, 0x77, 0x7e, 0x5c, 0xc5, 0x3b, 0x17, 0xdb, 0x98, 0x5c, 0x6e, 0x63, 0xf2, 0x7d, 0x1b,
+	0x93, 0x9f, 0xdb, 0x98, 0x7c, 0x7e, 0xf1, 0x8f, 0x87, 0xf9, 0xaa, 0x15, 0x67, 0x9d, 0x70, 0x70,
+	0xcf, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xbe, 0xbb, 0xf0, 0x6c, 0xdb, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/linux/runcopts/runc.proto b/vendor/github.com/containerd/containerd/linux/runcopts/runc.proto
new file mode 100644
index 0000000..3d10dc9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/runcopts/runc.proto
@@ -0,0 +1,38 @@
+syntax = "proto3";
+
+package containerd.linux.runc;
+
+import "gogoproto/gogo.proto";
+
+option go_package = "github.com/containerd/containerd/linux/runcopts;runcopts";
+
+message RuncOptions {
+	string runtime = 1;
+	string runtime_root = 2;
+	string criu_path = 3;
+	bool systemd_cgroup = 4;
+}
+
+message CreateOptions {
+	bool no_pivot_root = 1;
+	bool open_tcp = 2;
+	bool external_unix_sockets = 3;
+	bool terminal = 4;
+	bool file_locks = 5;
+	repeated string empty_namespaces = 6;
+	string cgroups_mode = 7;
+	bool no_new_keyring = 8;
+	string shim_cgroup = 9;
+	uint32 io_uid = 10;
+	uint32 io_gid = 11;
+}
+
+message CheckpointOptions {
+	bool exit = 1;
+	bool open_tcp = 2;
+	bool external_unix_sockets = 3;
+	bool terminal = 4;
+	bool file_locks = 5;
+	repeated string empty_namespaces = 6;
+	string cgroups_mode = 7;
+}
diff --git a/vendor/github.com/containerd/containerd/linux/runtime.go b/vendor/github.com/containerd/containerd/linux/runtime.go
new file mode 100644
index 0000000..26d001f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/runtime.go
@@ -0,0 +1,521 @@
+// +build linux
+
+package linux
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"time"
+
+	"github.com/boltdb/bolt"
+	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/identifiers"
+	"github.com/containerd/containerd/linux/runcopts"
+	client "github.com/containerd/containerd/linux/shim"
+	shim "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/metadata"
+	"github.com/containerd/containerd/namespaces"
+	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/containerd/plugin"
+	"github.com/containerd/containerd/reaper"
+	"github.com/containerd/containerd/runtime"
+	"github.com/containerd/containerd/sys"
+	runc "github.com/containerd/go-runc"
+	"github.com/containerd/typeurl"
+	google_protobuf "github.com/golang/protobuf/ptypes/empty"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"golang.org/x/sys/unix"
+)
+
+var (
+	pluginID = fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "linux")
+	empty    = &google_protobuf.Empty{}
+)
+
+const (
+	configFilename = "config.json"
+	defaultRuntime = "runc"
+	defaultShim    = "containerd-shim"
+)
+
+func init() {
+	plugin.Register(&plugin.Registration{
+		Type:   plugin.RuntimePlugin,
+		ID:     "linux",
+		InitFn: New,
+		Requires: []plugin.Type{
+			plugin.TaskMonitorPlugin,
+			plugin.MetadataPlugin,
+		},
+		Config: &Config{
+			Shim:    defaultShim,
+			Runtime: defaultRuntime,
+		},
+	})
+}
+
+var _ = (runtime.Runtime)(&Runtime{})
+
+// Config options for the runtime
+type Config struct {
+	// Shim is a path or name of binary implementing the Shim GRPC API
+	Shim string `toml:"shim"`
+	// Runtime is a path or name of an OCI runtime used by the shim
+	Runtime string `toml:"runtime"`
+	// RuntimeRoot is the path that shall be used by the OCI runtime for its data
+	RuntimeRoot string `toml:"runtime_root"`
+	// NoShim calls runc directly from within the pkg
+	NoShim bool `toml:"no_shim"`
+	// Debug enable debug on the shim
+	ShimDebug bool `toml:"shim_debug"`
+	// ShimNoMountNS prevents the runtime from putting shims into their own mount namespace.
+	//
+	// Putting the shim in its own mount namespace ensure that any mounts made
+	// by it in order to get the task rootfs ready will be undone regardless
+	// on how the shim dies.
+	//
+	// NOTE: This should only be used in kernel older than 3.18 to avoid shims
+	// from causing a DoS in their parent namespace due to having a copy of
+	// mounts previously there which would prevent unlink, rename and remove
+	// operations on those mountpoints.
+	ShimNoMountNS bool `toml:"shim_no_newns"`
+}
+
+// New returns a configured runtime
+func New(ic *plugin.InitContext) (interface{}, error) {
+	ic.Meta.Platforms = []ocispec.Platform{platforms.DefaultSpec()}
+
+	if err := os.MkdirAll(ic.Root, 0711); err != nil {
+		return nil, err
+	}
+	if err := os.MkdirAll(ic.State, 0711); err != nil {
+		return nil, err
+	}
+	monitor, err := ic.Get(plugin.TaskMonitorPlugin)
+	if err != nil {
+		return nil, err
+	}
+	m, err := ic.Get(plugin.MetadataPlugin)
+	if err != nil {
+		return nil, err
+	}
+	cfg := ic.Config.(*Config)
+	r := &Runtime{
+		root:    ic.Root,
+		state:   ic.State,
+		monitor: monitor.(runtime.TaskMonitor),
+		tasks:   runtime.NewTaskList(),
+		db:      m.(*metadata.DB),
+		address: ic.Address,
+		events:  ic.Events,
+		config:  cfg,
+	}
+	tasks, err := r.restoreTasks(ic.Context)
+	if err != nil {
+		return nil, err
+	}
+
+	// TODO: need to add the tasks to the monitor
+	for _, t := range tasks {
+		if err := r.tasks.AddWithNamespace(t.namespace, t); err != nil {
+			return nil, err
+		}
+	}
+	return r, nil
+}
+
+// Runtime for a linux based system
+type Runtime struct {
+	root    string
+	state   string
+	address string
+
+	monitor runtime.TaskMonitor
+	tasks   *runtime.TaskList
+	db      *metadata.DB
+	events  *events.Exchange
+
+	config *Config
+}
+
+// ID of the runtime
+func (r *Runtime) ID() string {
+	return pluginID
+}
+
+// Create a new task
+func (r *Runtime) Create(ctx context.Context, id string, opts runtime.CreateOpts) (_ runtime.Task, err error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := identifiers.Validate(id); err != nil {
+		return nil, errors.Wrapf(err, "invalid task id")
+	}
+
+	ropts, err := r.getRuncOptions(ctx, id)
+	if err != nil {
+		return nil, err
+	}
+
+	ec := reaper.Default.Subscribe()
+	defer reaper.Default.Unsubscribe(ec)
+
+	bundle, err := newBundle(id,
+		filepath.Join(r.state, namespace),
+		filepath.Join(r.root, namespace),
+		opts.Spec.Value)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		if err != nil {
+			bundle.Delete()
+		}
+	}()
+
+	shimopt := ShimLocal(r.events)
+	if !r.config.NoShim {
+		var cgroup string
+		if opts.Options != nil {
+			v, err := typeurl.UnmarshalAny(opts.Options)
+			if err != nil {
+				return nil, err
+			}
+			cgroup = v.(*runcopts.CreateOptions).ShimCgroup
+		}
+		exitHandler := func() {
+			log.G(ctx).WithField("id", id).Info("shim reaped")
+			t, err := r.tasks.Get(ctx, id)
+			if err != nil {
+				// Task was never started or was already sucessfully deleted
+				return
+			}
+			lc := t.(*Task)
+
+			// Stop the monitor
+			if err := r.monitor.Stop(lc); err != nil {
+				log.G(ctx).WithError(err).WithFields(logrus.Fields{
+					"id":        id,
+					"namespace": namespace,
+				}).Warn("failed to stop monitor")
+			}
+
+			log.G(ctx).WithFields(logrus.Fields{
+				"id":        id,
+				"namespace": namespace,
+			}).Warn("cleaning up after killed shim")
+			err = r.cleanupAfterDeadShim(context.Background(), bundle, namespace, id, lc.pid, ec)
+			if err == nil {
+				r.tasks.Delete(ctx, lc)
+			} else {
+				log.G(ctx).WithError(err).WithFields(logrus.Fields{
+					"id":        id,
+					"namespace": namespace,
+				}).Warn("failed to clen up after killed shim")
+			}
+		}
+		shimopt = ShimRemote(r.config.Shim, r.address, cgroup,
+			r.config.ShimNoMountNS, r.config.ShimDebug, exitHandler)
+	}
+
+	s, err := bundle.NewShimClient(ctx, namespace, shimopt, ropts)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		if err != nil {
+			if kerr := s.KillShim(ctx); kerr != nil {
+				log.G(ctx).WithError(err).Error("failed to kill shim")
+			}
+		}
+	}()
+
+	runtime := r.config.Runtime
+	if ropts != nil && ropts.Runtime != "" {
+		runtime = ropts.Runtime
+	}
+	sopts := &shim.CreateTaskRequest{
+		ID:         id,
+		Bundle:     bundle.path,
+		Runtime:    runtime,
+		Stdin:      opts.IO.Stdin,
+		Stdout:     opts.IO.Stdout,
+		Stderr:     opts.IO.Stderr,
+		Terminal:   opts.IO.Terminal,
+		Checkpoint: opts.Checkpoint,
+		Options:    opts.Options,
+	}
+	for _, m := range opts.Rootfs {
+		sopts.Rootfs = append(sopts.Rootfs, &types.Mount{
+			Type:    m.Type,
+			Source:  m.Source,
+			Options: m.Options,
+		})
+	}
+	cr, err := s.Create(ctx, sopts)
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	t, err := newTask(id, namespace, int(cr.Pid), s, r.monitor)
+	if err != nil {
+		return nil, err
+	}
+	if err := r.tasks.Add(ctx, t); err != nil {
+		return nil, err
+	}
+	// after the task is created, add it to the monitor if it has a cgroup
+	// this can be different on a checkpoint/restore
+	if t.cg != nil {
+		if err = r.monitor.Monitor(t); err != nil {
+			if _, err := r.Delete(ctx, t); err != nil {
+				log.G(ctx).WithError(err).Error("deleting task after failed monitor")
+			}
+			return nil, err
+		}
+	}
+	return t, nil
+}
+
+// Delete a task removing all on disk state
+func (r *Runtime) Delete(ctx context.Context, c runtime.Task) (*runtime.Exit, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+	lc, ok := c.(*Task)
+	if !ok {
+		return nil, fmt.Errorf("task cannot be cast as *linux.Task")
+	}
+	if err := r.monitor.Stop(lc); err != nil {
+		return nil, err
+	}
+	bundle := loadBundle(
+		lc.id,
+		filepath.Join(r.state, namespace, lc.id),
+		filepath.Join(r.root, namespace, lc.id),
+	)
+
+	rsp, err := lc.shim.Delete(ctx, empty)
+	if err != nil {
+		if cerr := r.cleanupAfterDeadShim(ctx, bundle, namespace, c.ID(), lc.pid, nil); cerr != nil {
+			log.G(ctx).WithError(err).Error("unable to cleanup task")
+		}
+		return nil, errdefs.FromGRPC(err)
+	}
+	r.tasks.Delete(ctx, lc)
+	if err := lc.shim.KillShim(ctx); err != nil {
+		log.G(ctx).WithError(err).Error("failed to kill shim")
+	}
+
+	if err := bundle.Delete(); err != nil {
+		log.G(ctx).WithError(err).Error("failed to delete bundle")
+	}
+	return &runtime.Exit{
+		Status:    rsp.ExitStatus,
+		Timestamp: rsp.ExitedAt,
+		Pid:       rsp.Pid,
+	}, nil
+}
+
+// Tasks returns all tasks known to the runtime
+func (r *Runtime) Tasks(ctx context.Context) ([]runtime.Task, error) {
+	return r.tasks.GetAll(ctx)
+}
+
+func (r *Runtime) restoreTasks(ctx context.Context) ([]*Task, error) {
+	dir, err := ioutil.ReadDir(r.state)
+	if err != nil {
+		return nil, err
+	}
+	var o []*Task
+	for _, namespace := range dir {
+		if !namespace.IsDir() {
+			continue
+		}
+		name := namespace.Name()
+		log.G(ctx).WithField("namespace", name).Debug("loading tasks in namespace")
+		tasks, err := r.loadTasks(ctx, name)
+		if err != nil {
+			return nil, err
+		}
+		o = append(o, tasks...)
+	}
+	return o, nil
+}
+
+// Get a specific task by task id
+func (r *Runtime) Get(ctx context.Context, id string) (runtime.Task, error) {
+	return r.tasks.Get(ctx, id)
+}
+
+func (r *Runtime) loadTasks(ctx context.Context, ns string) ([]*Task, error) {
+	dir, err := ioutil.ReadDir(filepath.Join(r.state, ns))
+	if err != nil {
+		return nil, err
+	}
+	var o []*Task
+	for _, path := range dir {
+		if !path.IsDir() {
+			continue
+		}
+		id := path.Name()
+		bundle := loadBundle(
+			id,
+			filepath.Join(r.state, ns, id),
+			filepath.Join(r.root, ns, id),
+		)
+		pid, _ := runc.ReadPidFile(filepath.Join(bundle.path, client.InitPidFile))
+		s, err := bundle.NewShimClient(ctx, ns, ShimConnect(), nil)
+		if err != nil {
+			log.G(ctx).WithError(err).WithFields(logrus.Fields{
+				"id":        id,
+				"namespace": ns,
+			}).Error("connecting to shim")
+			err := r.cleanupAfterDeadShim(ctx, bundle, ns, id, pid, nil)
+			if err != nil {
+				log.G(ctx).WithError(err).WithField("bundle", bundle.path).
+					Error("cleaning up after dead shim")
+			}
+			continue
+		}
+
+		t, err := newTask(id, ns, pid, s, r.monitor)
+		if err != nil {
+			log.G(ctx).WithError(err).Error("loading task type")
+			continue
+		}
+		o = append(o, t)
+	}
+	return o, nil
+}
+
+func (r *Runtime) cleanupAfterDeadShim(ctx context.Context, bundle *bundle, ns, id string, pid int, ec chan runc.Exit) error {
+	ctx = namespaces.WithNamespace(ctx, ns)
+	if err := r.terminate(ctx, bundle, ns, id); err != nil {
+		if r.config.ShimDebug {
+			return errors.Wrap(err, "failed to terminate task, leaving bundle for debugging")
+		}
+		log.G(ctx).WithError(err).Warn("failed to terminate task")
+	}
+
+	if ec != nil {
+		// if sub-reaper is set, reap our new child
+		if v, err := sys.GetSubreaper(); err == nil && v == 1 {
+			for e := range ec {
+				if e.Pid == pid {
+					break
+				}
+			}
+		}
+	}
+
+	// Notify Client
+	exitedAt := time.Now().UTC()
+	r.events.Publish(ctx, runtime.TaskExitEventTopic, &eventsapi.TaskExit{
+		ContainerID: id,
+		ID:          id,
+		Pid:         uint32(pid),
+		ExitStatus:  128 + uint32(unix.SIGKILL),
+		ExitedAt:    exitedAt,
+	})
+
+	if err := bundle.Delete(); err != nil {
+		log.G(ctx).WithError(err).Error("delete bundle")
+	}
+
+	r.events.Publish(ctx, runtime.TaskDeleteEventTopic, &eventsapi.TaskDelete{
+		ContainerID: id,
+		Pid:         uint32(pid),
+		ExitStatus:  128 + uint32(unix.SIGKILL),
+		ExitedAt:    exitedAt,
+	})
+
+	return nil
+}
+
+func (r *Runtime) terminate(ctx context.Context, bundle *bundle, ns, id string) error {
+	ctx = namespaces.WithNamespace(ctx, ns)
+	rt, err := r.getRuntime(ctx, ns, id)
+	if err != nil {
+		return err
+	}
+
+	if err := rt.Delete(ctx, id, &runc.DeleteOpts{
+		Force: true,
+	}); err != nil {
+		log.G(ctx).WithError(err).Warnf("delete runtime state %s", id)
+	}
+	if err := unix.Unmount(filepath.Join(bundle.path, "rootfs"), 0); err != nil {
+		log.G(ctx).WithError(err).WithFields(logrus.Fields{
+			"path": bundle.path,
+			"id":   id,
+		}).Warnf("unmount task rootfs")
+	}
+	return nil
+}
+
+func (r *Runtime) getRuntime(ctx context.Context, ns, id string) (*runc.Runc, error) {
+	ropts, err := r.getRuncOptions(ctx, id)
+	if err != nil {
+		return nil, err
+	}
+
+	var (
+		cmd  = r.config.Runtime
+		root = client.RuncRoot
+	)
+	if ropts != nil {
+		if ropts.Runtime != "" {
+			cmd = ropts.Runtime
+		}
+		if ropts.RuntimeRoot != "" {
+			root = ropts.RuntimeRoot
+		}
+	}
+
+	return &runc.Runc{
+		Command:      cmd,
+		LogFormat:    runc.JSON,
+		PdeathSignal: unix.SIGKILL,
+		Root:         filepath.Join(root, ns),
+	}, nil
+}
+
+func (r *Runtime) getRuncOptions(ctx context.Context, id string) (*runcopts.RuncOptions, error) {
+	var container containers.Container
+
+	if err := r.db.View(func(tx *bolt.Tx) error {
+		store := metadata.NewContainerStore(tx)
+		var err error
+		container, err = store.Get(ctx, id)
+		return err
+	}); err != nil {
+		return nil, err
+	}
+
+	if container.Runtime.Options != nil {
+		v, err := typeurl.UnmarshalAny(container.Runtime.Options)
+		if err != nil {
+			return nil, err
+		}
+		ropts, ok := v.(*runcopts.RuncOptions)
+		if !ok {
+			return nil, errors.New("invalid runtime options format")
+		}
+
+		return ropts, nil
+	}
+	return nil, nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client.go b/vendor/github.com/containerd/containerd/linux/shim/client.go
new file mode 100644
index 0000000..eae946b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/client.go
@@ -0,0 +1,282 @@
+// +build !windows
+
+package shim
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"os/exec"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+
+	"github.com/containerd/containerd/events"
+	shim "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/reaper"
+	"github.com/containerd/containerd/sys"
+	"google.golang.org/grpc"
+)
+
+// ClientOpt is an option for a shim client configuration
+type ClientOpt func(context.Context, Config) (shim.ShimClient, io.Closer, error)
+
+// WithStart executes a new shim process
+func WithStart(binary, address, daemonAddress, cgroup string, nonewns, debug bool, exitHandler func()) ClientOpt {
+	return func(ctx context.Context, config Config) (_ shim.ShimClient, _ io.Closer, err error) {
+		socket, err := newSocket(address)
+		if err != nil {
+			return nil, nil, err
+		}
+		defer socket.Close()
+		f, err := socket.File()
+		if err != nil {
+			return nil, nil, errors.Wrapf(err, "failed to get fd for socket %s", address)
+		}
+		defer f.Close()
+
+		cmd := newCommand(binary, daemonAddress, nonewns, debug, config, f)
+		ec, err := reaper.Default.Start(cmd)
+		if err != nil {
+			return nil, nil, errors.Wrapf(err, "failed to start shim")
+		}
+		defer func() {
+			if err != nil {
+				cmd.Process.Kill()
+			}
+		}()
+		go func() {
+			reaper.Default.Wait(cmd, ec)
+			exitHandler()
+		}()
+		log.G(ctx).WithFields(logrus.Fields{
+			"pid":     cmd.Process.Pid,
+			"address": address,
+			"debug":   debug,
+		}).Infof("shim %s started", binary)
+		// set shim in cgroup if it is provided
+		if cgroup != "" {
+			if err := setCgroup(cgroup, cmd); err != nil {
+				return nil, nil, err
+			}
+			log.G(ctx).WithFields(logrus.Fields{
+				"pid":     cmd.Process.Pid,
+				"address": address,
+			}).Infof("shim placed in cgroup %s", cgroup)
+		}
+		if err = sys.SetOOMScore(cmd.Process.Pid, sys.OOMScoreMaxKillable); err != nil {
+			return nil, nil, errors.Wrap(err, "failed to set OOM Score on shim")
+		}
+		c, clo, err := WithConnect(address)(ctx, config)
+		if err != nil {
+			return nil, nil, errors.Wrap(err, "failed to connect")
+		}
+		return c, clo, nil
+	}
+}
+
+func newCommand(binary, daemonAddress string, nonewns, debug bool, config Config, socket *os.File) *exec.Cmd {
+	args := []string{
+		"--namespace", config.Namespace,
+		"--workdir", config.WorkDir,
+		"--address", daemonAddress,
+	}
+
+	if config.Criu != "" {
+		args = append(args, "--criu-path", config.Criu)
+	}
+	if config.RuntimeRoot != "" {
+		args = append(args, "--runtime-root", config.RuntimeRoot)
+	}
+	if config.SystemdCgroup {
+		args = append(args, "--systemd-cgroup")
+	}
+	if debug {
+		args = append(args, "--debug")
+	}
+
+	cmd := exec.Command(binary, args...)
+	cmd.Dir = config.Path
+	// make sure the shim can be re-parented to system init
+	// and is cloned in a new mount namespace because the overlay/filesystems
+	// will be mounted by the shim
+	cmd.SysProcAttr = getSysProcAttr(nonewns)
+	cmd.ExtraFiles = append(cmd.ExtraFiles, socket)
+	if debug {
+		cmd.Stdout = os.Stdout
+		cmd.Stderr = os.Stderr
+	}
+	return cmd
+}
+
+func newSocket(address string) (*net.UnixListener, error) {
+	if len(address) > 106 {
+		return nil, errors.Errorf("%q: unix socket path too long (limit 106)", address)
+	}
+	l, err := net.Listen("unix", "\x00"+address)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to listen to abstract unix socket %q", address)
+	}
+
+	return l.(*net.UnixListener), nil
+}
+
+func connect(address string, d func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) {
+	gopts := []grpc.DialOption{
+		grpc.WithBlock(),
+		grpc.WithInsecure(),
+		grpc.WithTimeout(100 * time.Second),
+		grpc.WithDialer(d),
+		grpc.FailOnNonTempDialError(true),
+	}
+	conn, err := grpc.Dial(dialAddress(address), gopts...)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to dial %q", address)
+	}
+	return conn, nil
+}
+
+func dialer(address string, timeout time.Duration) (net.Conn, error) {
+	address = strings.TrimPrefix(address, "unix://")
+	return net.DialTimeout("unix", address, timeout)
+}
+
+func annonDialer(address string, timeout time.Duration) (net.Conn, error) {
+	address = strings.TrimPrefix(address, "unix://")
+	return net.DialTimeout("unix", "\x00"+address, timeout)
+}
+
+func dialAddress(address string) string {
+	return fmt.Sprintf("unix://%s", address)
+}
+
+// WithConnect connects to an existing shim
+func WithConnect(address string) ClientOpt {
+	return func(ctx context.Context, config Config) (shim.ShimClient, io.Closer, error) {
+		conn, err := connect(address, annonDialer)
+		if err != nil {
+			return nil, nil, err
+		}
+		return shim.NewShimClient(conn), conn, nil
+	}
+}
+
+// WithLocal uses an in process shim
+func WithLocal(publisher events.Publisher) func(context.Context, Config) (shim.ShimClient, io.Closer, error) {
+	return func(ctx context.Context, config Config) (shim.ShimClient, io.Closer, error) {
+		service, err := NewService(config, publisher)
+		if err != nil {
+			return nil, nil, err
+		}
+		return NewLocal(service), nil, nil
+	}
+}
+
+// Config contains shim specific configuration
+type Config struct {
+	Path          string
+	Namespace     string
+	WorkDir       string
+	Criu          string
+	RuntimeRoot   string
+	SystemdCgroup bool
+}
+
+// New returns a new shim client
+func New(ctx context.Context, config Config, opt ClientOpt) (*Client, error) {
+	s, c, err := opt(ctx, config)
+	if err != nil {
+		return nil, err
+	}
+	return &Client{
+		ShimClient: s,
+		c:          c,
+		exitCh:     make(chan struct{}),
+	}, nil
+}
+
+// Client is a shim client containing the connection to a shim
+type Client struct {
+	shim.ShimClient
+
+	c        io.Closer
+	exitCh   chan struct{}
+	exitOnce sync.Once
+}
+
+// IsAlive returns true if the shim can be contacted.
+// NOTE: a negative answer doesn't mean that the process is gone.
+func (c *Client) IsAlive(ctx context.Context) (bool, error) {
+	_, err := c.ShimInfo(ctx, empty)
+	if err != nil {
+		if err != grpc.ErrServerStopped {
+			return false, err
+		}
+		return false, nil
+	}
+	return true, nil
+}
+
+// StopShim signals the shim to exit and wait for the process to disappear
+func (c *Client) StopShim(ctx context.Context) error {
+	return c.signalShim(ctx, unix.SIGTERM)
+}
+
+// KillShim kills the shim forcefully and wait for the process to disappear
+func (c *Client) KillShim(ctx context.Context) error {
+	return c.signalShim(ctx, unix.SIGKILL)
+}
+
+// Close the cient connection
+func (c *Client) Close() error {
+	if c.c == nil {
+		return nil
+	}
+	return c.c.Close()
+}
+
+func (c *Client) signalShim(ctx context.Context, sig syscall.Signal) error {
+	info, err := c.ShimInfo(ctx, empty)
+	if err != nil {
+		return err
+	}
+	pid := int(info.ShimPid)
+	// make sure we don't kill ourselves if we are running a local shim
+	if os.Getpid() == pid {
+		return nil
+	}
+	if err := unix.Kill(pid, sig); err != nil && err != unix.ESRCH {
+		return err
+	}
+	// wait for shim to die after being signaled
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-c.waitForExit(pid):
+		return nil
+	}
+}
+
+func (c *Client) waitForExit(pid int) <-chan struct{} {
+	c.exitOnce.Do(func() {
+		for {
+			// use kill(pid, 0) here because the shim could have been reparented
+			// and we are no longer able to waitpid(pid, ...) on the shim
+			if err := unix.Kill(pid, 0); err == unix.ESRCH {
+				close(c.exitCh)
+				return
+			}
+			time.Sleep(10 * time.Millisecond)
+		}
+	})
+	return c.exitCh
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client_linux.go b/vendor/github.com/containerd/containerd/linux/shim/client_linux.go
new file mode 100644
index 0000000..515e88c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/client_linux.go
@@ -0,0 +1,34 @@
+// +build linux
+
+package shim
+
+import (
+	"os/exec"
+	"syscall"
+
+	"github.com/containerd/cgroups"
+	"github.com/pkg/errors"
+)
+
+func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
+	attr := syscall.SysProcAttr{
+		Setpgid: true,
+	}
+	if !nonewns {
+		attr.Cloneflags = syscall.CLONE_NEWNS
+	}
+	return &attr
+}
+
+func setCgroup(cgroupPath string, cmd *exec.Cmd) error {
+	cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(cgroupPath))
+	if err != nil {
+		return errors.Wrapf(err, "failed to load cgroup %s", cgroupPath)
+	}
+	if err := cg.Add(cgroups.Process{
+		Pid: cmd.Process.Pid,
+	}); err != nil {
+		return errors.Wrapf(err, "failed to join cgroup %s", cgroupPath)
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/client_unix.go b/vendor/github.com/containerd/containerd/linux/shim/client_unix.go
new file mode 100644
index 0000000..d478f3d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/client_unix.go
@@ -0,0 +1,18 @@
+// +build !linux,!windows
+
+package shim
+
+import (
+	"os/exec"
+	"syscall"
+)
+
+func getSysProcAttr(nonewns bool) *syscall.SysProcAttr {
+	return &syscall.SysProcAttr{
+		Setpgid: true,
+	}
+}
+
+func setCgroup(cgroupPath string, cmd *exec.Cmd) error {
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/deleted_state.go b/vendor/github.com/containerd/containerd/linux/shim/deleted_state.go
new file mode 100644
index 0000000..6d22735
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/deleted_state.go
@@ -0,0 +1,50 @@
+// +build !windows
+
+package shim
+
+import (
+	"context"
+
+	"github.com/containerd/console"
+	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/pkg/errors"
+)
+
+type deletedState struct {
+}
+
+func (s *deletedState) Pause(ctx context.Context) error {
+	return errors.Errorf("cannot pause a deleted process")
+}
+
+func (s *deletedState) Resume(ctx context.Context) error {
+	return errors.Errorf("cannot resume a deleted process")
+}
+
+func (s *deletedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+	return errors.Errorf("cannot update a deleted process")
+}
+
+func (s *deletedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error {
+	return errors.Errorf("cannot checkpoint a deleted process")
+}
+
+func (s *deletedState) Resize(ws console.WinSize) error {
+	return errors.Errorf("cannot resize a deleted process")
+}
+
+func (s *deletedState) Start(ctx context.Context) error {
+	return errors.Errorf("cannot start a deleted process")
+}
+
+func (s *deletedState) Delete(ctx context.Context) error {
+	return errors.Errorf("cannot delete a deleted process")
+}
+
+func (s *deletedState) Kill(ctx context.Context, sig uint32, all bool) error {
+	return errors.Errorf("cannot kill a deleted process")
+}
+
+func (s *deletedState) SetExited(status int) {
+	// no op
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/exec.go b/vendor/github.com/containerd/containerd/linux/shim/exec.go
new file mode 100644
index 0000000..3d27c91
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/exec.go
@@ -0,0 +1,229 @@
+// +build !windows
+
+package shim
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"path/filepath"
+	"sync"
+	"syscall"
+	"time"
+
+	"golang.org/x/sys/unix"
+
+	"github.com/containerd/console"
+	"github.com/containerd/containerd/identifiers"
+	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/fifo"
+	runc "github.com/containerd/go-runc"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+type execProcess struct {
+	wg sync.WaitGroup
+
+	processState
+
+	mu      sync.Mutex
+	id      string
+	console console.Console
+	io      runc.IO
+	status  int
+	exited  time.Time
+	pid     int
+	closers []io.Closer
+	stdin   io.Closer
+	stdio   stdio
+	path    string
+	spec    specs.Process
+
+	parent    *initProcess
+	waitBlock chan struct{}
+}
+
+func newExecProcess(context context.Context, path string, r *shimapi.ExecProcessRequest, parent *initProcess, id string) (process, error) {
+	if err := identifiers.Validate(id); err != nil {
+		return nil, errors.Wrapf(err, "invalid exec id")
+	}
+	// process exec request
+	var spec specs.Process
+	if err := json.Unmarshal(r.Spec.Value, &spec); err != nil {
+		return nil, err
+	}
+	spec.Terminal = r.Terminal
+
+	e := &execProcess{
+		id:     id,
+		path:   path,
+		parent: parent,
+		spec:   spec,
+		stdio: stdio{
+			stdin:    r.Stdin,
+			stdout:   r.Stdout,
+			stderr:   r.Stderr,
+			terminal: r.Terminal,
+		},
+		waitBlock: make(chan struct{}),
+	}
+	e.processState = &execCreatedState{p: e}
+	return e, nil
+}
+
+func (e *execProcess) Wait() {
+	<-e.waitBlock
+}
+
+func (e *execProcess) ID() string {
+	return e.id
+}
+
+func (e *execProcess) Pid() int {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	return e.pid
+}
+
+func (e *execProcess) ExitStatus() int {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	return e.status
+}
+
+func (e *execProcess) ExitedAt() time.Time {
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	return e.exited
+}
+
+func (e *execProcess) setExited(status int) {
+	e.status = status
+	e.exited = time.Now()
+	e.parent.platform.shutdownConsole(context.Background(), e.console)
+	close(e.waitBlock)
+}
+
+func (e *execProcess) delete(ctx context.Context) error {
+	e.wg.Wait()
+	if e.io != nil {
+		for _, c := range e.closers {
+			c.Close()
+		}
+		e.io.Close()
+	}
+	return nil
+}
+
+func (e *execProcess) resize(ws console.WinSize) error {
+	if e.console == nil {
+		return nil
+	}
+	return e.console.Resize(ws)
+}
+
+func (e *execProcess) kill(ctx context.Context, sig uint32, _ bool) error {
+	pid := e.pid
+	if pid != 0 {
+		if err := unix.Kill(pid, syscall.Signal(sig)); err != nil {
+			return errors.Wrapf(checkKillError(err), "exec kill error")
+		}
+	}
+	return nil
+}
+
+func (e *execProcess) Stdin() io.Closer {
+	return e.stdin
+}
+
+func (e *execProcess) Stdio() stdio {
+	return e.stdio
+}
+
+func (e *execProcess) start(ctx context.Context) (err error) {
+	var (
+		socket  *runc.Socket
+		pidfile = filepath.Join(e.path, fmt.Sprintf("%s.pid", e.id))
+	)
+	if e.stdio.terminal {
+		if socket, err = runc.NewTempConsoleSocket(); err != nil {
+			return errors.Wrap(err, "failed to create runc console socket")
+		}
+		defer socket.Close()
+	} else if e.stdio.isNull() {
+		if e.io, err = runc.NewNullIO(); err != nil {
+			return errors.Wrap(err, "creating new NULL IO")
+		}
+	} else {
+		if e.io, err = runc.NewPipeIO(e.parent.IoUID, e.parent.IoGID); err != nil {
+			return errors.Wrap(err, "failed to create runc io pipes")
+		}
+	}
+	opts := &runc.ExecOpts{
+		PidFile: pidfile,
+		IO:      e.io,
+		Detach:  true,
+	}
+	if socket != nil {
+		opts.ConsoleSocket = socket
+	}
+	if err := e.parent.runtime.Exec(ctx, e.parent.id, e.spec, opts); err != nil {
+		return e.parent.runtimeError(err, "OCI runtime exec failed")
+	}
+	if e.stdio.stdin != "" {
+		sc, err := fifo.OpenFifo(ctx, e.stdio.stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
+		if err != nil {
+			return errors.Wrapf(err, "failed to open stdin fifo %s", e.stdio.stdin)
+		}
+		e.closers = append(e.closers, sc)
+		e.stdin = sc
+	}
+	var copyWaitGroup sync.WaitGroup
+	if socket != nil {
+		console, err := socket.ReceiveMaster()
+		if err != nil {
+			return errors.Wrap(err, "failed to retrieve console master")
+		}
+		if e.console, err = e.parent.platform.copyConsole(ctx, console, e.stdio.stdin, e.stdio.stdout, e.stdio.stderr, &e.wg, &copyWaitGroup); err != nil {
+			return errors.Wrap(err, "failed to start console copy")
+		}
+	} else if !e.stdio.isNull() {
+		if err := copyPipes(ctx, e.io, e.stdio.stdin, e.stdio.stdout, e.stdio.stderr, &e.wg, &copyWaitGroup); err != nil {
+			return errors.Wrap(err, "failed to start io pipe copy")
+		}
+	}
+	copyWaitGroup.Wait()
+	pid, err := runc.ReadPidFile(opts.PidFile)
+	if err != nil {
+		return errors.Wrap(err, "failed to retrieve OCI runtime exec pid")
+	}
+	e.pid = pid
+	return nil
+}
+
+func (e *execProcess) Status(ctx context.Context) (string, error) {
+	s, err := e.parent.Status(ctx)
+	if err != nil {
+		return "", err
+	}
+	// if the container as a whole is in the pausing/paused state, so are all
+	// other processes inside the container, use container state here
+	switch s {
+	case "paused", "pausing":
+		return s, nil
+	}
+	e.mu.Lock()
+	defer e.mu.Unlock()
+	// if we don't have a pid then the exec process has just been created
+	if e.pid == 0 {
+		return "created", nil
+	}
+	// if we have a pid and it can be signaled, the process is running
+	if err := unix.Kill(e.pid, 0); err == nil {
+		return "running", nil
+	}
+	// else if we have a pid but it can nolonger be signaled, it has stopped
+	return "stopped", nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/exec_state.go b/vendor/github.com/containerd/containerd/linux/shim/exec_state.go
new file mode 100644
index 0000000..4a4aaa2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/exec_state.go
@@ -0,0 +1,172 @@
+// +build !windows
+
+package shim
+
+import (
+	"context"
+
+	"github.com/containerd/console"
+	"github.com/pkg/errors"
+)
+
+type execCreatedState struct {
+	p *execProcess
+}
+
+func (s *execCreatedState) transition(name string) error {
+	switch name {
+	case "running":
+		s.p.processState = &execRunningState{p: s.p}
+	case "stopped":
+		s.p.processState = &execStoppedState{p: s.p}
+	case "deleted":
+		s.p.processState = &deletedState{}
+	default:
+		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
+	}
+	return nil
+}
+
+func (s *execCreatedState) Resize(ws console.WinSize) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.resize(ws)
+}
+
+func (s *execCreatedState) Start(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	if err := s.p.start(ctx); err != nil {
+		return err
+	}
+	return s.transition("running")
+}
+
+func (s *execCreatedState) Delete(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	if err := s.p.delete(ctx); err != nil {
+		return err
+	}
+	return s.transition("deleted")
+}
+
+func (s *execCreatedState) Kill(ctx context.Context, sig uint32, all bool) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.kill(ctx, sig, all)
+}
+
+func (s *execCreatedState) SetExited(status int) {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	s.p.setExited(status)
+
+	if err := s.transition("stopped"); err != nil {
+		panic(err)
+	}
+}
+
+type execRunningState struct {
+	p *execProcess
+}
+
+func (s *execRunningState) transition(name string) error {
+	switch name {
+	case "stopped":
+		s.p.processState = &execStoppedState{p: s.p}
+	default:
+		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
+	}
+	return nil
+}
+
+func (s *execRunningState) Resize(ws console.WinSize) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.resize(ws)
+}
+
+func (s *execRunningState) Start(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot start a running process")
+}
+
+func (s *execRunningState) Delete(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot delete a running process")
+}
+
+func (s *execRunningState) Kill(ctx context.Context, sig uint32, all bool) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.kill(ctx, sig, all)
+}
+
+func (s *execRunningState) SetExited(status int) {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	s.p.setExited(status)
+
+	if err := s.transition("stopped"); err != nil {
+		panic(err)
+	}
+}
+
+type execStoppedState struct {
+	p *execProcess
+}
+
+func (s *execStoppedState) transition(name string) error {
+	switch name {
+	case "deleted":
+		s.p.processState = &deletedState{}
+	default:
+		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
+	}
+	return nil
+}
+
+func (s *execStoppedState) Resize(ws console.WinSize) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot resize a stopped container")
+}
+
+func (s *execStoppedState) Start(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot start a stopped process")
+}
+
+func (s *execStoppedState) Delete(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	if err := s.p.delete(ctx); err != nil {
+		return err
+	}
+	return s.transition("deleted")
+}
+
+func (s *execStoppedState) Kill(ctx context.Context, sig uint32, all bool) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.kill(ctx, sig, all)
+}
+
+func (s *execStoppedState) SetExited(status int) {
+	// no op
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/init.go b/vendor/github.com/containerd/containerd/linux/shim/init.go
new file mode 100644
index 0000000..88c39a6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/init.go
@@ -0,0 +1,389 @@
+// +build !windows
+
+package shim
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/containerd/console"
+	"github.com/containerd/containerd/identifiers"
+	"github.com/containerd/containerd/linux/runcopts"
+	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/fifo"
+	runc "github.com/containerd/go-runc"
+	"github.com/containerd/typeurl"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+// InitPidFile name of the file that contains the init pid
+const InitPidFile = "init.pid"
+
+type initProcess struct {
+	wg sync.WaitGroup
+	initState
+
+	// mu is used to ensure that `Start()` and `Exited()` calls return in
+	// the right order when invoked in separate go routines.
+	// This is the case within the shim implementation as it makes use of
+	// the reaper interface.
+	mu sync.Mutex
+
+	waitBlock chan struct{}
+
+	workDir string
+
+	id       string
+	bundle   string
+	console  console.Console
+	platform platform
+	io       runc.IO
+	runtime  *runc.Runc
+	status   int
+	exited   time.Time
+	pid      int
+	closers  []io.Closer
+	stdin    io.Closer
+	stdio    stdio
+	rootfs   string
+	IoUID    int
+	IoGID    int
+}
+
+func (s *Service) newInitProcess(context context.Context, r *shimapi.CreateTaskRequest) (*initProcess, error) {
+	var success bool
+
+	if err := identifiers.Validate(r.ID); err != nil {
+		return nil, errors.Wrapf(err, "invalid task id")
+	}
+	var options runcopts.CreateOptions
+	if r.Options != nil {
+		v, err := typeurl.UnmarshalAny(r.Options)
+		if err != nil {
+			return nil, err
+		}
+		options = *v.(*runcopts.CreateOptions)
+	}
+
+	rootfs := filepath.Join(s.config.Path, "rootfs")
+	// count the number of successful mounts so we can undo
+	// what was actually done rather than what should have been
+	// done.
+	defer func() {
+		if success {
+			return
+		}
+		if err2 := mount.UnmountAll(rootfs, 0); err2 != nil {
+			log.G(context).WithError(err2).Warn("Failed to cleanup rootfs mount")
+		}
+	}()
+	for _, rm := range r.Rootfs {
+		m := &mount.Mount{
+			Type:    rm.Type,
+			Source:  rm.Source,
+			Options: rm.Options,
+		}
+		if err := m.Mount(rootfs); err != nil {
+			return nil, errors.Wrapf(err, "failed to mount rootfs component %v", m)
+		}
+	}
+	runtime := &runc.Runc{
+		Command:       r.Runtime,
+		Log:           filepath.Join(s.config.Path, "log.json"),
+		LogFormat:     runc.JSON,
+		PdeathSignal:  syscall.SIGKILL,
+		Root:          filepath.Join(s.config.RuntimeRoot, s.config.Namespace),
+		Criu:          s.config.Criu,
+		SystemdCgroup: s.config.SystemdCgroup,
+	}
+	p := &initProcess{
+		id:       r.ID,
+		bundle:   r.Bundle,
+		runtime:  runtime,
+		platform: s.platform,
+		stdio: stdio{
+			stdin:    r.Stdin,
+			stdout:   r.Stdout,
+			stderr:   r.Stderr,
+			terminal: r.Terminal,
+		},
+		rootfs:    rootfs,
+		workDir:   s.config.WorkDir,
+		status:    0,
+		waitBlock: make(chan struct{}),
+		IoUID:     int(options.IoUid),
+		IoGID:     int(options.IoGid),
+	}
+	p.initState = &createdState{p: p}
+	var (
+		err    error
+		socket *runc.Socket
+	)
+	if r.Terminal {
+		if socket, err = runc.NewTempConsoleSocket(); err != nil {
+			return nil, errors.Wrap(err, "failed to create OCI runtime console socket")
+		}
+		defer socket.Close()
+	} else if hasNoIO(r) {
+		if p.io, err = runc.NewNullIO(); err != nil {
+			return nil, errors.Wrap(err, "creating new NULL IO")
+		}
+	} else {
+		if p.io, err = runc.NewPipeIO(int(options.IoUid), int(options.IoGid)); err != nil {
+			return nil, errors.Wrap(err, "failed to create OCI runtime io pipes")
+		}
+	}
+	pidFile := filepath.Join(s.config.Path, InitPidFile)
+	if r.Checkpoint != "" {
+		opts := &runc.RestoreOpts{
+			CheckpointOpts: runc.CheckpointOpts{
+				ImagePath:  r.Checkpoint,
+				WorkDir:    p.workDir,
+				ParentPath: r.ParentCheckpoint,
+			},
+			PidFile:     pidFile,
+			IO:          p.io,
+			NoPivot:     options.NoPivotRoot,
+			Detach:      true,
+			NoSubreaper: true,
+		}
+		p.initState = &createdCheckpointState{
+			p:    p,
+			opts: opts,
+		}
+		success = true
+		return p, nil
+	}
+	opts := &runc.CreateOpts{
+		PidFile:      pidFile,
+		IO:           p.io,
+		NoPivot:      options.NoPivotRoot,
+		NoNewKeyring: options.NoNewKeyring,
+	}
+	if socket != nil {
+		opts.ConsoleSocket = socket
+	}
+	if err := p.runtime.Create(context, r.ID, r.Bundle, opts); err != nil {
+		return nil, p.runtimeError(err, "OCI runtime create failed")
+	}
+	if r.Stdin != "" {
+		sc, err := fifo.OpenFifo(context, r.Stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
+		if err != nil {
+			return nil, errors.Wrapf(err, "failed to open stdin fifo %s", r.Stdin)
+		}
+		p.stdin = sc
+		p.closers = append(p.closers, sc)
+	}
+	var copyWaitGroup sync.WaitGroup
+	if socket != nil {
+		console, err := socket.ReceiveMaster()
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to retrieve console master")
+		}
+		console, err = s.platform.copyConsole(context, console, r.Stdin, r.Stdout, r.Stderr, &p.wg, &copyWaitGroup)
+		if err != nil {
+			return nil, errors.Wrap(err, "failed to start console copy")
+		}
+		p.console = console
+	} else if !hasNoIO(r) {
+		if err := copyPipes(context, p.io, r.Stdin, r.Stdout, r.Stderr, &p.wg, &copyWaitGroup); err != nil {
+			return nil, errors.Wrap(err, "failed to start io pipe copy")
+		}
+	}
+
+	copyWaitGroup.Wait()
+	pid, err := runc.ReadPidFile(pidFile)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to retrieve OCI runtime container pid")
+	}
+	p.pid = pid
+	success = true
+	return p, nil
+}
+
+func (p *initProcess) Wait() {
+	<-p.waitBlock
+}
+
+func (p *initProcess) ID() string {
+	return p.id
+}
+
+func (p *initProcess) Pid() int {
+	return p.pid
+}
+
+func (p *initProcess) ExitStatus() int {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	return p.status
+}
+
+func (p *initProcess) ExitedAt() time.Time {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	return p.exited
+}
+
+func (p *initProcess) Status(ctx context.Context) (string, error) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	c, err := p.runtime.State(ctx, p.id)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return "stopped", nil
+		}
+		return "", p.runtimeError(err, "OCI runtime state failed")
+	}
+	return c.Status, nil
+}
+
+func (p *initProcess) start(context context.Context) error {
+	err := p.runtime.Start(context, p.id)
+	return p.runtimeError(err, "OCI runtime start failed")
+}
+
+func (p *initProcess) setExited(status int) {
+	p.exited = time.Now()
+	p.status = status
+	p.platform.shutdownConsole(context.Background(), p.console)
+	close(p.waitBlock)
+}
+
+func (p *initProcess) delete(context context.Context) error {
+	p.killAll(context)
+	p.wg.Wait()
+	err := p.runtime.Delete(context, p.id, nil)
+	// ignore errors if a runtime has already deleted the process
+	// but we still hold metadata and pipes
+	//
+	// this is common during a checkpoint, runc will delete the container state
+	// after a checkpoint and the container will no longer exist within runc
+	if err != nil {
+		if strings.Contains(err.Error(), "does not exist") {
+			err = nil
+		} else {
+			err = p.runtimeError(err, "failed to delete task")
+		}
+	}
+	if p.io != nil {
+		for _, c := range p.closers {
+			c.Close()
+		}
+		p.io.Close()
+	}
+	if err2 := mount.UnmountAll(p.rootfs, 0); err2 != nil {
+		log.G(context).WithError(err2).Warn("failed to cleanup rootfs mount")
+		if err == nil {
+			err = errors.Wrap(err2, "failed rootfs umount")
+		}
+	}
+	return err
+}
+
+func (p *initProcess) resize(ws console.WinSize) error {
+	if p.console == nil {
+		return nil
+	}
+	return p.console.Resize(ws)
+}
+
+func (p *initProcess) pause(context context.Context) error {
+	err := p.runtime.Pause(context, p.id)
+	return p.runtimeError(err, "OCI runtime pause failed")
+}
+
+func (p *initProcess) resume(context context.Context) error {
+	err := p.runtime.Resume(context, p.id)
+	return p.runtimeError(err, "OCI runtime resume failed")
+}
+
+func (p *initProcess) kill(context context.Context, signal uint32, all bool) error {
+	err := p.runtime.Kill(context, p.id, int(signal), &runc.KillOpts{
+		All: all,
+	})
+	return checkKillError(err)
+}
+
+func (p *initProcess) killAll(context context.Context) error {
+	err := p.runtime.Kill(context, p.id, int(syscall.SIGKILL), &runc.KillOpts{
+		All: true,
+	})
+	return p.runtimeError(err, "OCI runtime killall failed")
+}
+
+func (p *initProcess) Stdin() io.Closer {
+	return p.stdin
+}
+
+func (p *initProcess) checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error {
+	var options runcopts.CheckpointOptions
+	if r.Options != nil {
+		v, err := typeurl.UnmarshalAny(r.Options)
+		if err != nil {
+			return err
+		}
+		options = *v.(*runcopts.CheckpointOptions)
+	}
+	var actions []runc.CheckpointAction
+	if !options.Exit {
+		actions = append(actions, runc.LeaveRunning)
+	}
+	work := filepath.Join(p.workDir, "criu-work")
+	defer os.RemoveAll(work)
+	if err := p.runtime.Checkpoint(context, p.id, &runc.CheckpointOpts{
+		WorkDir:                  work,
+		ImagePath:                r.Path,
+		AllowOpenTCP:             options.OpenTcp,
+		AllowExternalUnixSockets: options.ExternalUnixSockets,
+		AllowTerminal:            options.Terminal,
+		FileLocks:                options.FileLocks,
+		EmptyNamespaces:          options.EmptyNamespaces,
+	}, actions...); err != nil {
+		dumpLog := filepath.Join(p.bundle, "criu-dump.log")
+		if cerr := copyFile(dumpLog, filepath.Join(work, "dump.log")); cerr != nil {
+			log.G(context).Error(err)
+		}
+		return fmt.Errorf("%s path= %s", criuError(err), dumpLog)
+	}
+	return nil
+}
+
+func (p *initProcess) update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+	var resources specs.LinuxResources
+	if err := json.Unmarshal(r.Resources.Value, &resources); err != nil {
+		return err
+	}
+	return p.runtime.Update(context, p.id, &resources)
+}
+
+func (p *initProcess) Stdio() stdio {
+	return p.stdio
+}
+
+func (p *initProcess) runtimeError(rErr error, msg string) error {
+	if rErr == nil {
+		return nil
+	}
+
+	rMsg, err := getLastRuntimeError(p.runtime)
+	switch {
+	case err != nil:
+		return errors.Wrapf(rErr, "%s: %s (%s)", msg, "unable to retrieve OCI runtime error", err.Error())
+	case rMsg == "":
+		return errors.Wrap(rErr, msg)
+	default:
+		return errors.Errorf("%s: %s", msg, rMsg)
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/init_state.go b/vendor/github.com/containerd/containerd/linux/shim/init_state.go
new file mode 100644
index 0000000..da7e15b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/init_state.go
@@ -0,0 +1,473 @@
+// +build !windows
+
+package shim
+
+import (
+	"context"
+	"sync"
+	"syscall"
+
+	"github.com/containerd/console"
+	"github.com/containerd/containerd/errdefs"
+	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/fifo"
+	runc "github.com/containerd/go-runc"
+	"github.com/pkg/errors"
+)
+
+type initState interface {
+	processState
+
+	Pause(context.Context) error
+	Resume(context.Context) error
+	Update(context.Context, *shimapi.UpdateTaskRequest) error
+	Checkpoint(context.Context, *shimapi.CheckpointTaskRequest) error
+}
+
+type createdState struct {
+	p *initProcess
+}
+
+func (s *createdState) transition(name string) error {
+	switch name {
+	case "running":
+		s.p.initState = &runningState{p: s.p}
+	case "stopped":
+		s.p.initState = &stoppedState{p: s.p}
+	case "deleted":
+		s.p.initState = &deletedState{}
+	default:
+		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
+	}
+	return nil
+}
+
+func (s *createdState) Pause(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot pause task in created state")
+}
+
+func (s *createdState) Resume(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot resume task in created state")
+}
+
+func (s *createdState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.update(context, r)
+}
+
+func (s *createdState) Checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot checkpoint a task in created state")
+}
+
+func (s *createdState) Resize(ws console.WinSize) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.resize(ws)
+}
+
+func (s *createdState) Start(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	if err := s.p.start(ctx); err != nil {
+		return err
+	}
+	return s.transition("running")
+}
+
+func (s *createdState) Delete(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	if err := s.p.delete(ctx); err != nil {
+		return err
+	}
+	return s.transition("deleted")
+}
+
+func (s *createdState) Kill(ctx context.Context, sig uint32, all bool) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.kill(ctx, sig, all)
+}
+
+func (s *createdState) SetExited(status int) {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	s.p.setExited(status)
+
+	if err := s.transition("stopped"); err != nil {
+		panic(err)
+	}
+}
+
+type createdCheckpointState struct {
+	p    *initProcess
+	opts *runc.RestoreOpts
+}
+
+func (s *createdCheckpointState) transition(name string) error {
+	switch name {
+	case "running":
+		s.p.initState = &runningState{p: s.p}
+	case "stopped":
+		s.p.initState = &stoppedState{p: s.p}
+	case "deleted":
+		s.p.initState = &deletedState{}
+	default:
+		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
+	}
+	return nil
+}
+
+func (s *createdCheckpointState) Pause(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot pause task in created state")
+}
+
+func (s *createdCheckpointState) Resume(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot resume task in created state")
+}
+
+func (s *createdCheckpointState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.update(context, r)
+}
+
+func (s *createdCheckpointState) Checkpoint(context context.Context, r *shimapi.CheckpointTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot checkpoint a task in created state")
+}
+
+func (s *createdCheckpointState) Resize(ws console.WinSize) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.resize(ws)
+}
+
+func (s *createdCheckpointState) Start(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	p := s.p
+	if _, err := s.p.runtime.Restore(ctx, p.id, p.bundle, s.opts); err != nil {
+		return p.runtimeError(err, "OCI runtime restore failed")
+	}
+	sio := p.stdio
+	if sio.stdin != "" {
+		sc, err := fifo.OpenFifo(ctx, sio.stdin, syscall.O_WRONLY|syscall.O_NONBLOCK, 0)
+		if err != nil {
+			return errors.Wrapf(err, "failed to open stdin fifo %s", sio.stdin)
+		}
+		p.stdin = sc
+		p.closers = append(p.closers, sc)
+	}
+	var copyWaitGroup sync.WaitGroup
+	if !sio.isNull() {
+		if err := copyPipes(ctx, p.io, sio.stdin, sio.stdout, sio.stderr, &p.wg, &copyWaitGroup); err != nil {
+			return errors.Wrap(err, "failed to start io pipe copy")
+		}
+	}
+
+	copyWaitGroup.Wait()
+	pid, err := runc.ReadPidFile(s.opts.PidFile)
+	if err != nil {
+		return errors.Wrap(err, "failed to retrieve OCI runtime container pid")
+	}
+	p.pid = pid
+
+	return s.transition("running")
+}
+
+func (s *createdCheckpointState) Delete(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	if err := s.p.delete(ctx); err != nil {
+		return err
+	}
+	return s.transition("deleted")
+}
+
+func (s *createdCheckpointState) Kill(ctx context.Context, sig uint32, all bool) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.kill(ctx, sig, all)
+}
+
+func (s *createdCheckpointState) SetExited(status int) {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	s.p.setExited(status)
+
+	if err := s.transition("stopped"); err != nil {
+		panic(err)
+	}
+}
+
+type runningState struct {
+	p *initProcess
+}
+
+func (s *runningState) transition(name string) error {
+	switch name {
+	case "stopped":
+		s.p.initState = &stoppedState{p: s.p}
+	case "paused":
+		s.p.initState = &pausedState{p: s.p}
+	default:
+		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
+	}
+	return nil
+}
+
+func (s *runningState) Pause(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	if err := s.p.pause(ctx); err != nil {
+		return err
+	}
+	return s.transition("paused")
+}
+
+func (s *runningState) Resume(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot resume a running process")
+}
+
+func (s *runningState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.update(context, r)
+}
+
+func (s *runningState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.checkpoint(ctx, r)
+}
+
+func (s *runningState) Resize(ws console.WinSize) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.resize(ws)
+}
+
+func (s *runningState) Start(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot start a running process")
+}
+
+func (s *runningState) Delete(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot delete a running process")
+}
+
+func (s *runningState) Kill(ctx context.Context, sig uint32, all bool) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.kill(ctx, sig, all)
+}
+
+func (s *runningState) SetExited(status int) {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	s.p.setExited(status)
+
+	if err := s.transition("stopped"); err != nil {
+		panic(err)
+	}
+}
+
+type pausedState struct {
+	p *initProcess
+}
+
+func (s *pausedState) transition(name string) error {
+	switch name {
+	case "running":
+		s.p.initState = &runningState{p: s.p}
+	case "stopped":
+		s.p.initState = &stoppedState{p: s.p}
+	default:
+		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
+	}
+	return nil
+}
+
+func (s *pausedState) Pause(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot pause a paused container")
+}
+
+func (s *pausedState) Resume(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	if err := s.p.resume(ctx); err != nil {
+		return err
+	}
+	return s.transition("running")
+}
+
+func (s *pausedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.update(context, r)
+}
+
+func (s *pausedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.checkpoint(ctx, r)
+}
+
+func (s *pausedState) Resize(ws console.WinSize) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.resize(ws)
+}
+
+func (s *pausedState) Start(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot start a paused process")
+}
+
+func (s *pausedState) Delete(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot delete a paused process")
+}
+
+func (s *pausedState) Kill(ctx context.Context, sig uint32, all bool) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return s.p.kill(ctx, sig, all)
+}
+
+func (s *pausedState) SetExited(status int) {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	s.p.setExited(status)
+
+	if err := s.transition("stopped"); err != nil {
+		panic(err)
+	}
+
+}
+
+type stoppedState struct {
+	p *initProcess
+}
+
+func (s *stoppedState) transition(name string) error {
+	switch name {
+	case "deleted":
+		s.p.initState = &deletedState{}
+	default:
+		return errors.Errorf("invalid state transition %q to %q", stateName(s), name)
+	}
+	return nil
+}
+
+func (s *stoppedState) Pause(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot pause a stopped container")
+}
+
+func (s *stoppedState) Resume(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot resume a stopped container")
+}
+
+func (s *stoppedState) Update(context context.Context, r *shimapi.UpdateTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot update a stopped container")
+}
+
+func (s *stoppedState) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot checkpoint a stopped container")
+}
+
+func (s *stoppedState) Resize(ws console.WinSize) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot resize a stopped container")
+}
+
+func (s *stoppedState) Start(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+
+	return errors.Errorf("cannot start a stopped process")
+}
+
+func (s *stoppedState) Delete(ctx context.Context) error {
+	s.p.mu.Lock()
+	defer s.p.mu.Unlock()
+	if err := s.p.delete(ctx); err != nil {
+		return err
+	}
+	return s.transition("deleted")
+}
+
+func (s *stoppedState) Kill(ctx context.Context, sig uint32, all bool) error {
+	return errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s not found", s.p.id)
+}
+
+func (s *stoppedState) SetExited(status int) {
+	// no op
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/io.go b/vendor/github.com/containerd/containerd/linux/shim/io.go
new file mode 100644
index 0000000..49ba8e0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/io.go
@@ -0,0 +1,101 @@
+// +build !windows
+
+package shim
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"sync"
+	"syscall"
+
+	"github.com/containerd/console"
+	"github.com/containerd/fifo"
+	runc "github.com/containerd/go-runc"
+)
+
+func copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
+	if stdin != "" {
+		in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
+		if err != nil {
+			return err
+		}
+		cwg.Add(1)
+		go func() {
+			cwg.Done()
+			io.Copy(console, in)
+		}()
+	}
+	outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0)
+	if err != nil {
+		return err
+	}
+	outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0)
+	if err != nil {
+		return err
+	}
+	wg.Add(1)
+	cwg.Add(1)
+	go func() {
+		cwg.Done()
+		io.Copy(outw, console)
+		console.Close()
+		outr.Close()
+		outw.Close()
+		wg.Done()
+	}()
+	return nil
+}
+
+func copyPipes(ctx context.Context, rio runc.IO, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) error {
+	for name, dest := range map[string]func(wc io.WriteCloser, rc io.Closer){
+		stdout: func(wc io.WriteCloser, rc io.Closer) {
+			wg.Add(1)
+			cwg.Add(1)
+			go func() {
+				cwg.Done()
+				io.Copy(wc, rio.Stdout())
+				wg.Done()
+				wc.Close()
+				rc.Close()
+			}()
+		},
+		stderr: func(wc io.WriteCloser, rc io.Closer) {
+			wg.Add(1)
+			cwg.Add(1)
+			go func() {
+				cwg.Done()
+				io.Copy(wc, rio.Stderr())
+				wg.Done()
+				wc.Close()
+				rc.Close()
+			}()
+		},
+	} {
+		fw, err := fifo.OpenFifo(ctx, name, syscall.O_WRONLY, 0)
+		if err != nil {
+			return fmt.Errorf("containerd-shim: opening %s failed: %s", name, err)
+		}
+		fr, err := fifo.OpenFifo(ctx, name, syscall.O_RDONLY, 0)
+		if err != nil {
+			return fmt.Errorf("containerd-shim: opening %s failed: %s", name, err)
+		}
+		dest(fw, fr)
+	}
+	if stdin == "" {
+		rio.Stdin().Close()
+		return nil
+	}
+	f, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
+	if err != nil {
+		return fmt.Errorf("containerd-shim: opening %s failed: %s", stdin, err)
+	}
+	cwg.Add(1)
+	go func() {
+		cwg.Done()
+		io.Copy(rio.Stdin(), f)
+		rio.Stdin().Close()
+		f.Close()
+	}()
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/local.go b/vendor/github.com/containerd/containerd/linux/shim/local.go
new file mode 100644
index 0000000..5e5634d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/local.go
@@ -0,0 +1,92 @@
+// +build !windows
+
+package shim
+
+import (
+	"path/filepath"
+
+	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	google_protobuf "github.com/golang/protobuf/ptypes/empty"
+	"golang.org/x/net/context"
+	"golang.org/x/sys/unix"
+	"google.golang.org/grpc"
+)
+
+// NewLocal returns a shim client implementation for issue commands to a shim
+func NewLocal(s *Service) shimapi.ShimClient {
+	return &local{
+		s: s,
+	}
+}
+
+type local struct {
+	s *Service
+}
+
+func (c *local) Create(ctx context.Context, in *shimapi.CreateTaskRequest, opts ...grpc.CallOption) (*shimapi.CreateTaskResponse, error) {
+	return c.s.Create(ctx, in)
+}
+
+func (c *local) Start(ctx context.Context, in *shimapi.StartRequest, opts ...grpc.CallOption) (*shimapi.StartResponse, error) {
+	return c.s.Start(ctx, in)
+}
+
+func (c *local) Delete(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) {
+	// make sure we unmount the containers rootfs for this local
+	if err := unix.Unmount(filepath.Join(c.s.config.Path, "rootfs"), 0); err != nil {
+		return nil, err
+	}
+	return c.s.Delete(ctx, in)
+}
+
+func (c *local) DeleteProcess(ctx context.Context, in *shimapi.DeleteProcessRequest, opts ...grpc.CallOption) (*shimapi.DeleteResponse, error) {
+	return c.s.DeleteProcess(ctx, in)
+}
+
+func (c *local) Exec(ctx context.Context, in *shimapi.ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	return c.s.Exec(ctx, in)
+}
+
+func (c *local) ResizePty(ctx context.Context, in *shimapi.ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	return c.s.ResizePty(ctx, in)
+}
+
+func (c *local) State(ctx context.Context, in *shimapi.StateRequest, opts ...grpc.CallOption) (*shimapi.StateResponse, error) {
+	return c.s.State(ctx, in)
+}
+
+func (c *local) Pause(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	return c.s.Pause(ctx, in)
+}
+
+func (c *local) Resume(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	return c.s.Resume(ctx, in)
+}
+
+func (c *local) Kill(ctx context.Context, in *shimapi.KillRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	return c.s.Kill(ctx, in)
+}
+
+func (c *local) ListPids(ctx context.Context, in *shimapi.ListPidsRequest, opts ...grpc.CallOption) (*shimapi.ListPidsResponse, error) {
+	return c.s.ListPids(ctx, in)
+}
+
+func (c *local) CloseIO(ctx context.Context, in *shimapi.CloseIORequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	return c.s.CloseIO(ctx, in)
+}
+
+func (c *local) Checkpoint(ctx context.Context, in *shimapi.CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	return c.s.Checkpoint(ctx, in)
+}
+
+func (c *local) ShimInfo(ctx context.Context, in *google_protobuf.Empty, opts ...grpc.CallOption) (*shimapi.ShimInfoResponse, error) {
+	return c.s.ShimInfo(ctx, in)
+}
+
+func (c *local) Update(ctx context.Context, in *shimapi.UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error) {
+	return c.s.Update(ctx, in)
+}
+
+func (c *local) Wait(ctx context.Context, in *shimapi.WaitRequest, opts ...grpc.CallOption) (*shimapi.WaitResponse, error) {
+	return c.s.Wait(ctx, in)
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/process.go b/vendor/github.com/containerd/containerd/linux/shim/process.go
new file mode 100644
index 0000000..f0b4692
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/process.go
@@ -0,0 +1,73 @@
+// +build !windows
+
+package shim
+
+import (
+	"context"
+	"io"
+	"time"
+
+	"github.com/containerd/console"
+	"github.com/pkg/errors"
+)
+
+type stdio struct {
+	stdin    string
+	stdout   string
+	stderr   string
+	terminal bool
+}
+
+func (s stdio) isNull() bool {
+	return s.stdin == "" && s.stdout == "" && s.stderr == ""
+}
+
+type process interface {
+	processState
+
+	// ID returns the id for the process
+	ID() string
+	// Pid returns the pid for the process
+	Pid() int
+	// ExitStatus returns the exit status
+	ExitStatus() int
+	// ExitedAt is the time the process exited
+	ExitedAt() time.Time
+	// Stdin returns the process STDIN
+	Stdin() io.Closer
+	// Stdio returns io information for the container
+	Stdio() stdio
+	// Status returns the process status
+	Status(context.Context) (string, error)
+	// Wait blocks until the process has exited
+	Wait()
+}
+
+type processState interface {
+	// Resize resizes the process console
+	Resize(ws console.WinSize) error
+	// Start execution of the process
+	Start(context.Context) error
+	// Delete deletes the process and its resourcess
+	Delete(context.Context) error
+	// Kill kills the process
+	Kill(context.Context, uint32, bool) error
+	// SetExited sets the exit status for the process
+	SetExited(status int)
+}
+
+func stateName(v interface{}) string {
+	switch v.(type) {
+	case *runningState, *execRunningState:
+		return "running"
+	case *createdState, *execCreatedState, *createdCheckpointState:
+		return "created"
+	case *pausedState:
+		return "paused"
+	case *deletedState:
+		return "deleted"
+	case *stoppedState:
+		return "stopped"
+	}
+	panic(errors.Errorf("invalid state %v", v))
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/service.go b/vendor/github.com/containerd/containerd/linux/shim/service.go
new file mode 100644
index 0000000..9c9f506
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/service.go
@@ -0,0 +1,513 @@
+// +build !windows
+
+package shim
+
+import (
+	"fmt"
+	"os"
+	"sync"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+
+	"github.com/containerd/console"
+	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	"github.com/containerd/containerd/api/types/task"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/events"
+	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/namespaces"
+	"github.com/containerd/containerd/reaper"
+	"github.com/containerd/containerd/runtime"
+	runc "github.com/containerd/go-runc"
+	google_protobuf "github.com/golang/protobuf/ptypes/empty"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"golang.org/x/net/context"
+)
+
+var empty = &google_protobuf.Empty{}
+
+// RuncRoot is the path to the root runc state directory
+const RuncRoot = "/run/containerd/runc"
+
+// NewService returns a new shim service that can be used via GRPC
+func NewService(config Config, publisher events.Publisher) (*Service, error) {
+	if config.Namespace == "" {
+		return nil, fmt.Errorf("shim namespace cannot be empty")
+	}
+	context := namespaces.WithNamespace(context.Background(), config.Namespace)
+	context = log.WithLogger(context, logrus.WithFields(logrus.Fields{
+		"namespace": config.Namespace,
+		"path":      config.Path,
+		"pid":       os.Getpid(),
+	}))
+	s := &Service{
+		config:    config,
+		context:   context,
+		processes: make(map[string]process),
+		events:    make(chan interface{}, 128),
+		ec:        reaper.Default.Subscribe(),
+	}
+	go s.processExits()
+	if err := s.initPlatform(); err != nil {
+		return nil, errors.Wrap(err, "failed to initialized platform behavior")
+	}
+	go s.forward(publisher)
+	return s, nil
+}
+
+// platform handles platform-specific behavior that may differs across
+// platform implementations
+type platform interface {
+	copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error)
+	shutdownConsole(ctx context.Context, console console.Console) error
+	close() error
+}
+
+// Service is the shim implementation of a remote shim over GRPC
+type Service struct {
+	mu sync.Mutex
+
+	config    Config
+	context   context.Context
+	processes map[string]process
+	events    chan interface{}
+	platform  platform
+	ec        chan runc.Exit
+
+	// Filled by Create()
+	id     string
+	bundle string
+}
+
+// Create a new initial process and container with the underlying OCI runtime
+func (s *Service) Create(ctx context.Context, r *shimapi.CreateTaskRequest) (*shimapi.CreateTaskResponse, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	process, err := s.newInitProcess(ctx, r)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	// save the main task id and bundle to the shim for additional requests
+	s.id = r.ID
+	s.bundle = r.Bundle
+	pid := process.Pid()
+	s.processes[r.ID] = process
+	s.events <- &eventsapi.TaskCreate{
+		ContainerID: r.ID,
+		Bundle:      r.Bundle,
+		Rootfs:      r.Rootfs,
+		IO: &eventsapi.TaskIO{
+			Stdin:    r.Stdin,
+			Stdout:   r.Stdout,
+			Stderr:   r.Stderr,
+			Terminal: r.Terminal,
+		},
+		Checkpoint: r.Checkpoint,
+		Pid:        uint32(pid),
+	}
+	return &shimapi.CreateTaskResponse{
+		Pid: uint32(pid),
+	}, nil
+}
+
+// Start a process
+func (s *Service) Start(ctx context.Context, r *shimapi.StartRequest) (*shimapi.StartResponse, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[r.ID]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process %s not found", r.ID)
+	}
+	if err := p.Start(ctx); err != nil {
+		return nil, err
+	}
+	if r.ID == s.id {
+		s.events <- &eventsapi.TaskStart{
+			ContainerID: s.id,
+			Pid:         uint32(p.Pid()),
+		}
+	} else {
+		pid := p.Pid()
+		s.events <- &eventsapi.TaskExecStarted{
+			ContainerID: s.id,
+			ExecID:      r.ID,
+			Pid:         uint32(pid),
+		}
+	}
+	return &shimapi.StartResponse{
+		ID:  p.ID(),
+		Pid: uint32(p.Pid()),
+	}, nil
+}
+
+// Delete the initial process and container
+func (s *Service) Delete(ctx context.Context, r *google_protobuf.Empty) (*shimapi.DeleteResponse, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[s.id]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
+	}
+
+	if err := p.Delete(ctx); err != nil {
+		return nil, err
+	}
+	delete(s.processes, s.id)
+	s.platform.close()
+	s.events <- &eventsapi.TaskDelete{
+		ContainerID: s.id,
+		ExitStatus:  uint32(p.ExitStatus()),
+		ExitedAt:    p.ExitedAt(),
+		Pid:         uint32(p.Pid()),
+	}
+	return &shimapi.DeleteResponse{
+		ExitStatus: uint32(p.ExitStatus()),
+		ExitedAt:   p.ExitedAt(),
+		Pid:        uint32(p.Pid()),
+	}, nil
+}
+
+// DeleteProcess deletes an exec'd process
+func (s *Service) DeleteProcess(ctx context.Context, r *shimapi.DeleteProcessRequest) (*shimapi.DeleteResponse, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if r.ID == s.id {
+		return nil, grpc.Errorf(codes.InvalidArgument, "cannot delete init process with DeleteProcess")
+	}
+	p := s.processes[r.ID]
+	if p == nil {
+		return nil, errors.Wrapf(errdefs.ErrNotFound, "process %s", r.ID)
+	}
+	if err := p.Delete(ctx); err != nil {
+		return nil, err
+	}
+	delete(s.processes, r.ID)
+	return &shimapi.DeleteResponse{
+		ExitStatus: uint32(p.ExitStatus()),
+		ExitedAt:   p.ExitedAt(),
+		Pid:        uint32(p.Pid()),
+	}, nil
+}
+
+// Exec an additional process inside the container
+func (s *Service) Exec(ctx context.Context, r *shimapi.ExecProcessRequest) (*google_protobuf.Empty, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+
+	if p := s.processes[r.ID]; p != nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrAlreadyExists, "id %s", r.ID)
+	}
+
+	p := s.processes[s.id]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
+	}
+
+	process, err := newExecProcess(ctx, s.config.Path, r, p.(*initProcess), r.ID)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	s.processes[r.ID] = process
+
+	s.events <- &eventsapi.TaskExecAdded{
+		ContainerID: s.id,
+		ExecID:      r.ID,
+	}
+	return empty, nil
+}
+
+// ResizePty of a process
+func (s *Service) ResizePty(ctx context.Context, r *shimapi.ResizePtyRequest) (*google_protobuf.Empty, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if r.ID == "" {
+		return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "id not provided")
+	}
+	ws := console.WinSize{
+		Width:  uint16(r.Width),
+		Height: uint16(r.Height),
+	}
+	p := s.processes[r.ID]
+	if p == nil {
+		return nil, errors.Errorf("process does not exist %s", r.ID)
+	}
+	if err := p.Resize(ws); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	return empty, nil
+}
+
+// State returns runtime state information for a process
+func (s *Service) State(ctx context.Context, r *shimapi.StateRequest) (*shimapi.StateResponse, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[r.ID]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process id %s not found", r.ID)
+	}
+	st, err := p.Status(ctx)
+	if err != nil {
+		return nil, err
+	}
+	status := task.StatusUnknown
+	switch st {
+	case "created":
+		status = task.StatusCreated
+	case "running":
+		status = task.StatusRunning
+	case "stopped":
+		status = task.StatusStopped
+	case "paused":
+		status = task.StatusPaused
+	case "pausing":
+		status = task.StatusPausing
+	}
+	sio := p.Stdio()
+	return &shimapi.StateResponse{
+		ID:         p.ID(),
+		Bundle:     s.bundle,
+		Pid:        uint32(p.Pid()),
+		Status:     status,
+		Stdin:      sio.stdin,
+		Stdout:     sio.stdout,
+		Stderr:     sio.stderr,
+		Terminal:   sio.terminal,
+		ExitStatus: uint32(p.ExitStatus()),
+		ExitedAt:   p.ExitedAt(),
+	}, nil
+}
+
+// Pause the container
+func (s *Service) Pause(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[s.id]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
+	}
+	if err := p.(*initProcess).Pause(ctx); err != nil {
+		return nil, err
+	}
+	s.events <- &eventsapi.TaskPaused{
+		ContainerID: s.id,
+	}
+	return empty, nil
+}
+
+// Resume the container
+func (s *Service) Resume(ctx context.Context, r *google_protobuf.Empty) (*google_protobuf.Empty, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[s.id]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
+	}
+	if err := p.(*initProcess).Resume(ctx); err != nil {
+		return nil, err
+	}
+	s.events <- &eventsapi.TaskResumed{
+		ContainerID: s.id,
+	}
+	return empty, nil
+}
+
+// Kill a process with the provided signal
+func (s *Service) Kill(ctx context.Context, r *shimapi.KillRequest) (*google_protobuf.Empty, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	if r.ID == "" {
+		p := s.processes[s.id]
+		if p == nil {
+			return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
+		}
+		if err := p.Kill(ctx, r.Signal, r.All); err != nil {
+			return nil, errdefs.ToGRPC(err)
+		}
+		return empty, nil
+	}
+
+	p := s.processes[r.ID]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process id %s not found", r.ID)
+	}
+	if err := p.Kill(ctx, r.Signal, r.All); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	return empty, nil
+}
+
+// ListPids returns all pids inside the container
+func (s *Service) ListPids(ctx context.Context, r *shimapi.ListPidsRequest) (*shimapi.ListPidsResponse, error) {
+	pids, err := s.getContainerPids(ctx, r.ID)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	var processes []*task.ProcessInfo
+	for _, pid := range pids {
+		processes = append(processes, &task.ProcessInfo{
+			Pid: pid,
+		})
+	}
+	return &shimapi.ListPidsResponse{
+		Processes: processes,
+	}, nil
+}
+
+// CloseIO of a process
+func (s *Service) CloseIO(ctx context.Context, r *shimapi.CloseIORequest) (*google_protobuf.Empty, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[r.ID]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrNotFound, "process does not exist %s", r.ID)
+	}
+	if stdin := p.Stdin(); stdin != nil {
+		if err := stdin.Close(); err != nil {
+			return nil, errors.Wrap(err, "close stdin")
+		}
+	}
+	return empty, nil
+}
+
+// Checkpoint the container
+func (s *Service) Checkpoint(ctx context.Context, r *shimapi.CheckpointTaskRequest) (*google_protobuf.Empty, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[s.id]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
+	}
+	if err := p.(*initProcess).Checkpoint(ctx, r); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	s.events <- &eventsapi.TaskCheckpointed{
+		ContainerID: s.id,
+	}
+	return empty, nil
+}
+
+// ShimInfo returns shim information such as the shim's pid
+func (s *Service) ShimInfo(ctx context.Context, r *google_protobuf.Empty) (*shimapi.ShimInfoResponse, error) {
+	return &shimapi.ShimInfoResponse{
+		ShimPid: uint32(os.Getpid()),
+	}, nil
+}
+
+// Update a running container
+func (s *Service) Update(ctx context.Context, r *shimapi.UpdateTaskRequest) (*google_protobuf.Empty, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[s.id]
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
+	}
+	if err := p.(*initProcess).Update(ctx, r); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	return empty, nil
+}
+
+// Wait for a process to exit
+func (s *Service) Wait(ctx context.Context, r *shimapi.WaitRequest) (*shimapi.WaitResponse, error) {
+	s.mu.Lock()
+	p := s.processes[r.ID]
+	s.mu.Unlock()
+	if p == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrFailedPrecondition, "container must be created")
+	}
+	p.Wait()
+
+	return &shimapi.WaitResponse{
+		ExitStatus: uint32(p.ExitStatus()),
+		ExitedAt:   p.ExitedAt(),
+	}, nil
+}
+
+func (s *Service) processExits() {
+	for e := range s.ec {
+		s.checkProcesses(e)
+	}
+}
+
+func (s *Service) checkProcesses(e runc.Exit) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	for _, p := range s.processes {
+		if p.Pid() == e.Pid {
+			if ip, ok := p.(*initProcess); ok {
+				// Ensure all children are killed
+				if err := ip.killAll(s.context); err != nil {
+					log.G(s.context).WithError(err).WithField("id", ip.ID()).
+						Error("failed to kill init's children")
+				}
+			}
+			p.SetExited(e.Status)
+			s.events <- &eventsapi.TaskExit{
+				ContainerID: s.id,
+				ID:          p.ID(),
+				Pid:         uint32(e.Pid),
+				ExitStatus:  uint32(e.Status),
+				ExitedAt:    p.ExitedAt(),
+			}
+			return
+		}
+	}
+}
+
+func (s *Service) getContainerPids(ctx context.Context, id string) ([]uint32, error) {
+	s.mu.Lock()
+	defer s.mu.Unlock()
+	p := s.processes[s.id]
+	if p == nil {
+		return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "container must be created")
+	}
+
+	ps, err := p.(*initProcess).runtime.Ps(ctx, id)
+	if err != nil {
+		return nil, err
+	}
+	pids := make([]uint32, 0, len(ps))
+	for _, pid := range ps {
+		pids = append(pids, uint32(pid))
+	}
+	return pids, nil
+}
+
+func (s *Service) forward(publisher events.Publisher) {
+	for e := range s.events {
+		if err := publisher.Publish(s.context, getTopic(s.context, e), e); err != nil {
+			logrus.WithError(err).Error("post event")
+		}
+	}
+}
+
+func getTopic(ctx context.Context, e interface{}) string {
+	switch e.(type) {
+	case *eventsapi.TaskCreate:
+		return runtime.TaskCreateEventTopic
+	case *eventsapi.TaskStart:
+		return runtime.TaskStartEventTopic
+	case *eventsapi.TaskOOM:
+		return runtime.TaskOOMEventTopic
+	case *eventsapi.TaskExit:
+		return runtime.TaskExitEventTopic
+	case *eventsapi.TaskDelete:
+		return runtime.TaskDeleteEventTopic
+	case *eventsapi.TaskExecAdded:
+		return runtime.TaskExecAddedEventTopic
+	case *eventsapi.TaskExecStarted:
+		return runtime.TaskExecStartedEventTopic
+	case *eventsapi.TaskPaused:
+		return runtime.TaskPausedEventTopic
+	case *eventsapi.TaskResumed:
+		return runtime.TaskResumedEventTopic
+	case *eventsapi.TaskCheckpointed:
+		return runtime.TaskCheckpointedEventTopic
+	default:
+		logrus.Warnf("no topic for type %#v", e)
+	}
+	return runtime.TaskUnknownTopic
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/service_linux.go b/vendor/github.com/containerd/containerd/linux/shim/service_linux.go
new file mode 100644
index 0000000..1d078ba
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/service_linux.go
@@ -0,0 +1,91 @@
+package shim
+
+import (
+	"io"
+	"sync"
+	"syscall"
+
+	"github.com/containerd/console"
+	"github.com/containerd/fifo"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+)
+
+type linuxPlatform struct {
+	epoller *console.Epoller
+}
+
+func (p *linuxPlatform) copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
+	if p.epoller == nil {
+		return nil, errors.New("uninitialized epoller")
+	}
+
+	epollConsole, err := p.epoller.Add(console)
+	if err != nil {
+		return nil, err
+	}
+
+	if stdin != "" {
+		in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
+		if err != nil {
+			return nil, err
+		}
+		cwg.Add(1)
+		go func() {
+			cwg.Done()
+			io.Copy(epollConsole, in)
+		}()
+	}
+
+	outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0)
+	if err != nil {
+		return nil, err
+	}
+	outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0)
+	if err != nil {
+		return nil, err
+	}
+	wg.Add(1)
+	cwg.Add(1)
+	go func() {
+		cwg.Done()
+		io.Copy(outw, epollConsole)
+		epollConsole.Close()
+		outr.Close()
+		outw.Close()
+		wg.Done()
+	}()
+	return epollConsole, nil
+}
+
+func (p *linuxPlatform) shutdownConsole(ctx context.Context, cons console.Console) error {
+	if p.epoller == nil {
+		return errors.New("uninitialized epoller")
+	}
+	epollConsole, ok := cons.(*console.EpollConsole)
+	if !ok {
+		return errors.Errorf("expected EpollConsole, got %#v", cons)
+	}
+	return epollConsole.Shutdown(p.epoller.CloseConsole)
+}
+
+func (p *linuxPlatform) close() error {
+	return p.epoller.Close()
+}
+
+// initialize a single epoll fd to manage our consoles. `initPlatform` should
+// only be called once.
+func (s *Service) initPlatform() error {
+	if s.platform != nil {
+		return nil
+	}
+	epoller, err := console.NewEpoller()
+	if err != nil {
+		return errors.Wrap(err, "failed to initialize epoller")
+	}
+	s.platform = &linuxPlatform{
+		epoller: epoller,
+	}
+	go epoller.Wait()
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/service_unix.go b/vendor/github.com/containerd/containerd/linux/shim/service_unix.go
new file mode 100644
index 0000000..c00b853
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/service_unix.go
@@ -0,0 +1,62 @@
+// +build !windows,!linux
+
+package shim
+
+import (
+	"io"
+	"sync"
+	"syscall"
+
+	"github.com/containerd/console"
+	"github.com/containerd/fifo"
+	"golang.org/x/net/context"
+)
+
+type unixPlatform struct {
+}
+
+func (p *unixPlatform) copyConsole(ctx context.Context, console console.Console, stdin, stdout, stderr string, wg, cwg *sync.WaitGroup) (console.Console, error) {
+	if stdin != "" {
+		in, err := fifo.OpenFifo(ctx, stdin, syscall.O_RDONLY, 0)
+		if err != nil {
+			return nil, err
+		}
+		cwg.Add(1)
+		go func() {
+			cwg.Done()
+			io.Copy(console, in)
+		}()
+	}
+	outw, err := fifo.OpenFifo(ctx, stdout, syscall.O_WRONLY, 0)
+	if err != nil {
+		return nil, err
+	}
+	outr, err := fifo.OpenFifo(ctx, stdout, syscall.O_RDONLY, 0)
+	if err != nil {
+		return nil, err
+	}
+	wg.Add(1)
+	cwg.Add(1)
+	go func() {
+		cwg.Done()
+		io.Copy(outw, console)
+		console.Close()
+		outr.Close()
+		outw.Close()
+		wg.Done()
+	}()
+	return console, nil
+}
+
+func (p *unixPlatform) shutdownConsole(ctx context.Context, cons console.Console) error {
+	return nil
+}
+
+func (p *unixPlatform) close() error {
+	return nil
+}
+
+func (s *Service) initPlatform() error {
+	s.platform = &unixPlatform{}
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/utils.go b/vendor/github.com/containerd/containerd/linux/shim/utils.go
new file mode 100644
index 0000000..317f8da
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/utils.go
@@ -0,0 +1,86 @@
+// +build !windows
+
+package shim
+
+import (
+	"encoding/json"
+	"io"
+	"os"
+	"strings"
+	"time"
+
+	"github.com/containerd/containerd/errdefs"
+	shimapi "github.com/containerd/containerd/linux/shim/v1"
+	runc "github.com/containerd/go-runc"
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+)
+
+// TODO(mlaventure): move to runc package?
+func getLastRuntimeError(r *runc.Runc) (string, error) {
+	if r.Log == "" {
+		return "", nil
+	}
+
+	f, err := os.OpenFile(r.Log, os.O_RDONLY, 0400)
+	if err != nil {
+		return "", err
+	}
+
+	var (
+		errMsg string
+		log    struct {
+			Level string
+			Msg   string
+			Time  time.Time
+		}
+	)
+
+	dec := json.NewDecoder(f)
+	for err = nil; err == nil; {
+		if err = dec.Decode(&log); err != nil && err != io.EOF {
+			return "", err
+		}
+		if log.Level == "error" {
+			errMsg = strings.TrimSpace(log.Msg)
+		}
+	}
+
+	return errMsg, nil
+}
+
+// criuError returns only the first line of the error message from criu
+// it tries to add an invalid dump log location when returning the message
+func criuError(err error) string {
+	parts := strings.Split(err.Error(), "\n")
+	return parts[0]
+}
+
+func copyFile(to, from string) error {
+	ff, err := os.Open(from)
+	if err != nil {
+		return err
+	}
+	defer ff.Close()
+	tt, err := os.Create(to)
+	if err != nil {
+		return err
+	}
+	defer tt.Close()
+	_, err = io.Copy(tt, ff)
+	return err
+}
+
+func checkKillError(err error) error {
+	if err == nil {
+		return nil
+	}
+	if strings.Contains(err.Error(), "os: process already finished") || err == unix.ESRCH {
+		return errors.Wrapf(errdefs.ErrNotFound, "process already finished")
+	}
+	return errors.Wrapf(err, "unknown error after kill")
+}
+
+func hasNoIO(r *shimapi.CreateTaskRequest) bool {
+	return r.Stdin == "" && r.Stdout == "" && r.Stderr == ""
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go
new file mode 100644
index 0000000..831d091
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.pb.go
@@ -0,0 +1,4749 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/linux/shim/v1/shim.proto
+// DO NOT EDIT!
+
+/*
+	Package shim is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/linux/shim/v1/shim.proto
+
+	It has these top-level messages:
+		CreateTaskRequest
+		CreateTaskResponse
+		DeleteResponse
+		DeleteProcessRequest
+		ExecProcessRequest
+		ExecProcessResponse
+		ResizePtyRequest
+		StateRequest
+		StateResponse
+		KillRequest
+		CloseIORequest
+		ListPidsRequest
+		ListPidsResponse
+		CheckpointTaskRequest
+		ShimInfoResponse
+		UpdateTaskRequest
+		StartRequest
+		StartResponse
+		WaitRequest
+		WaitResponse
+*/
+package shim
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/types"
+import google_protobuf1 "github.com/golang/protobuf/ptypes/empty"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/gogo/protobuf/types"
+import containerd_types "github.com/containerd/containerd/api/types"
+import containerd_v1_types "github.com/containerd/containerd/api/types/task"
+
+import time "time"
+
+import (
+	context "golang.org/x/net/context"
+	grpc "google.golang.org/grpc"
+)
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type CreateTaskRequest struct {
+	ID               string                    `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Bundle           string                    `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
+	Runtime          string                    `protobuf:"bytes,3,opt,name=runtime,proto3" json:"runtime,omitempty"`
+	Rootfs           []*containerd_types.Mount `protobuf:"bytes,4,rep,name=rootfs" json:"rootfs,omitempty"`
+	Terminal         bool                      `protobuf:"varint,5,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	Stdin            string                    `protobuf:"bytes,6,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout           string                    `protobuf:"bytes,7,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr           string                    `protobuf:"bytes,8,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Checkpoint       string                    `protobuf:"bytes,9,opt,name=checkpoint,proto3" json:"checkpoint,omitempty"`
+	ParentCheckpoint string                    `protobuf:"bytes,10,opt,name=parent_checkpoint,json=parentCheckpoint,proto3" json:"parent_checkpoint,omitempty"`
+	Options          *google_protobuf.Any      `protobuf:"bytes,11,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *CreateTaskRequest) Reset()                    { *m = CreateTaskRequest{} }
+func (*CreateTaskRequest) ProtoMessage()               {}
+func (*CreateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{0} }
+
+type CreateTaskResponse struct {
+	Pid uint32 `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+}
+
+func (m *CreateTaskResponse) Reset()                    { *m = CreateTaskResponse{} }
+func (*CreateTaskResponse) ProtoMessage()               {}
+func (*CreateTaskResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{1} }
+
+type DeleteResponse struct {
+	Pid        uint32    `protobuf:"varint,1,opt,name=pid,proto3" json:"pid,omitempty"`
+	ExitStatus uint32    `protobuf:"varint,2,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt   time.Time `protobuf:"bytes,3,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+}
+
+func (m *DeleteResponse) Reset()                    { *m = DeleteResponse{} }
+func (*DeleteResponse) ProtoMessage()               {}
+func (*DeleteResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{2} }
+
+type DeleteProcessRequest struct {
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *DeleteProcessRequest) Reset()                    { *m = DeleteProcessRequest{} }
+func (*DeleteProcessRequest) ProtoMessage()               {}
+func (*DeleteProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{3} }
+
+type ExecProcessRequest struct {
+	ID       string               `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Terminal bool                 `protobuf:"varint,2,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	Stdin    string               `protobuf:"bytes,3,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout   string               `protobuf:"bytes,4,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr   string               `protobuf:"bytes,5,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Spec     *google_protobuf.Any `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"`
+}
+
+func (m *ExecProcessRequest) Reset()                    { *m = ExecProcessRequest{} }
+func (*ExecProcessRequest) ProtoMessage()               {}
+func (*ExecProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{4} }
+
+type ExecProcessResponse struct {
+}
+
+func (m *ExecProcessResponse) Reset()                    { *m = ExecProcessResponse{} }
+func (*ExecProcessResponse) ProtoMessage()               {}
+func (*ExecProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{5} }
+
+type ResizePtyRequest struct {
+	ID     string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Width  uint32 `protobuf:"varint,2,opt,name=width,proto3" json:"width,omitempty"`
+	Height uint32 `protobuf:"varint,3,opt,name=height,proto3" json:"height,omitempty"`
+}
+
+func (m *ResizePtyRequest) Reset()                    { *m = ResizePtyRequest{} }
+func (*ResizePtyRequest) ProtoMessage()               {}
+func (*ResizePtyRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{6} }
+
+type StateRequest struct {
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *StateRequest) Reset()                    { *m = StateRequest{} }
+func (*StateRequest) ProtoMessage()               {}
+func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{7} }
+
+type StateResponse struct {
+	ID         string                     `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Bundle     string                     `protobuf:"bytes,2,opt,name=bundle,proto3" json:"bundle,omitempty"`
+	Pid        uint32                     `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
+	Status     containerd_v1_types.Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
+	Stdin      string                     `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
+	Stdout     string                     `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
+	Stderr     string                     `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
+	Terminal   bool                       `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
+	ExitStatus uint32                     `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt   time.Time                  `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+}
+
+func (m *StateResponse) Reset()                    { *m = StateResponse{} }
+func (*StateResponse) ProtoMessage()               {}
+func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{8} }
+
+type KillRequest struct {
+	ID     string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Signal uint32 `protobuf:"varint,2,opt,name=signal,proto3" json:"signal,omitempty"`
+	All    bool   `protobuf:"varint,3,opt,name=all,proto3" json:"all,omitempty"`
+}
+
+func (m *KillRequest) Reset()                    { *m = KillRequest{} }
+func (*KillRequest) ProtoMessage()               {}
+func (*KillRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{9} }
+
+type CloseIORequest struct {
+	ID    string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Stdin bool   `protobuf:"varint,2,opt,name=stdin,proto3" json:"stdin,omitempty"`
+}
+
+func (m *CloseIORequest) Reset()                    { *m = CloseIORequest{} }
+func (*CloseIORequest) ProtoMessage()               {}
+func (*CloseIORequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{10} }
+
+type ListPidsRequest struct {
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *ListPidsRequest) Reset()                    { *m = ListPidsRequest{} }
+func (*ListPidsRequest) ProtoMessage()               {}
+func (*ListPidsRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{11} }
+
+type ListPidsResponse struct {
+	Processes []*containerd_v1_types.ProcessInfo `protobuf:"bytes,1,rep,name=processes" json:"processes,omitempty"`
+}
+
+func (m *ListPidsResponse) Reset()                    { *m = ListPidsResponse{} }
+func (*ListPidsResponse) ProtoMessage()               {}
+func (*ListPidsResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{12} }
+
+type CheckpointTaskRequest struct {
+	Path    string               `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
+	Options *google_protobuf.Any `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+}
+
+func (m *CheckpointTaskRequest) Reset()                    { *m = CheckpointTaskRequest{} }
+func (*CheckpointTaskRequest) ProtoMessage()               {}
+func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{13} }
+
+type ShimInfoResponse struct {
+	ShimPid uint32 `protobuf:"varint,1,opt,name=shim_pid,json=shimPid,proto3" json:"shim_pid,omitempty"`
+}
+
+func (m *ShimInfoResponse) Reset()                    { *m = ShimInfoResponse{} }
+func (*ShimInfoResponse) ProtoMessage()               {}
+func (*ShimInfoResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{14} }
+
+type UpdateTaskRequest struct {
+	Resources *google_protobuf.Any `protobuf:"bytes,1,opt,name=resources" json:"resources,omitempty"`
+}
+
+func (m *UpdateTaskRequest) Reset()                    { *m = UpdateTaskRequest{} }
+func (*UpdateTaskRequest) ProtoMessage()               {}
+func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{15} }
+
+type StartRequest struct {
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *StartRequest) Reset()                    { *m = StartRequest{} }
+func (*StartRequest) ProtoMessage()               {}
+func (*StartRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{16} }
+
+type StartResponse struct {
+	ID  string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+}
+
+func (m *StartResponse) Reset()                    { *m = StartResponse{} }
+func (*StartResponse) ProtoMessage()               {}
+func (*StartResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{17} }
+
+type WaitRequest struct {
+	ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+}
+
+func (m *WaitRequest) Reset()                    { *m = WaitRequest{} }
+func (*WaitRequest) ProtoMessage()               {}
+func (*WaitRequest) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{18} }
+
+type WaitResponse struct {
+	ExitStatus uint32    `protobuf:"varint,1,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
+	ExitedAt   time.Time `protobuf:"bytes,2,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
+}
+
+func (m *WaitResponse) Reset()                    { *m = WaitResponse{} }
+func (*WaitResponse) ProtoMessage()               {}
+func (*WaitResponse) Descriptor() ([]byte, []int) { return fileDescriptorShim, []int{19} }
+
+func init() {
+	proto.RegisterType((*CreateTaskRequest)(nil), "containerd.runtime.linux.shim.v1.CreateTaskRequest")
+	proto.RegisterType((*CreateTaskResponse)(nil), "containerd.runtime.linux.shim.v1.CreateTaskResponse")
+	proto.RegisterType((*DeleteResponse)(nil), "containerd.runtime.linux.shim.v1.DeleteResponse")
+	proto.RegisterType((*DeleteProcessRequest)(nil), "containerd.runtime.linux.shim.v1.DeleteProcessRequest")
+	proto.RegisterType((*ExecProcessRequest)(nil), "containerd.runtime.linux.shim.v1.ExecProcessRequest")
+	proto.RegisterType((*ExecProcessResponse)(nil), "containerd.runtime.linux.shim.v1.ExecProcessResponse")
+	proto.RegisterType((*ResizePtyRequest)(nil), "containerd.runtime.linux.shim.v1.ResizePtyRequest")
+	proto.RegisterType((*StateRequest)(nil), "containerd.runtime.linux.shim.v1.StateRequest")
+	proto.RegisterType((*StateResponse)(nil), "containerd.runtime.linux.shim.v1.StateResponse")
+	proto.RegisterType((*KillRequest)(nil), "containerd.runtime.linux.shim.v1.KillRequest")
+	proto.RegisterType((*CloseIORequest)(nil), "containerd.runtime.linux.shim.v1.CloseIORequest")
+	proto.RegisterType((*ListPidsRequest)(nil), "containerd.runtime.linux.shim.v1.ListPidsRequest")
+	proto.RegisterType((*ListPidsResponse)(nil), "containerd.runtime.linux.shim.v1.ListPidsResponse")
+	proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.runtime.linux.shim.v1.CheckpointTaskRequest")
+	proto.RegisterType((*ShimInfoResponse)(nil), "containerd.runtime.linux.shim.v1.ShimInfoResponse")
+	proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.runtime.linux.shim.v1.UpdateTaskRequest")
+	proto.RegisterType((*StartRequest)(nil), "containerd.runtime.linux.shim.v1.StartRequest")
+	proto.RegisterType((*StartResponse)(nil), "containerd.runtime.linux.shim.v1.StartResponse")
+	proto.RegisterType((*WaitRequest)(nil), "containerd.runtime.linux.shim.v1.WaitRequest")
+	proto.RegisterType((*WaitResponse)(nil), "containerd.runtime.linux.shim.v1.WaitResponse")
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// Client API for Shim service
+
+type ShimClient interface {
+	// State returns shim and task state information.
+	State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error)
+	Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error)
+	Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error)
+	Delete(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*DeleteResponse, error)
+	DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error)
+	ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error)
+	Pause(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Resume(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	// ShimInfo returns information about the shim.
+	ShimInfo(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*ShimInfoResponse, error)
+	Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error)
+	Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error)
+}
+
+type shimClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewShimClient(cc *grpc.ClientConn) ShimClient {
+	return &shimClient{cc}
+}
+
+func (c *shimClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) {
+	out := new(StateResponse)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/State", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Create(ctx context.Context, in *CreateTaskRequest, opts ...grpc.CallOption) (*CreateTaskResponse, error) {
+	out := new(CreateTaskResponse)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Create", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Start(ctx context.Context, in *StartRequest, opts ...grpc.CallOption) (*StartResponse, error) {
+	out := new(StartResponse)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Start", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Delete(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*DeleteResponse, error) {
+	out := new(DeleteResponse)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Delete", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) DeleteProcess(ctx context.Context, in *DeleteProcessRequest, opts ...grpc.CallOption) (*DeleteResponse, error) {
+	out := new(DeleteResponse)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/DeleteProcess", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error) {
+	out := new(ListPidsResponse)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ListPids", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Pause(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Pause", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Resume(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Resume", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Checkpoint", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Kill(ctx context.Context, in *KillRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Kill", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Exec(ctx context.Context, in *ExecProcessRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Exec", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) ResizePty(ctx context.Context, in *ResizePtyRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ResizePty", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) CloseIO(ctx context.Context, in *CloseIORequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/CloseIO", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) ShimInfo(ctx context.Context, in *google_protobuf1.Empty, opts ...grpc.CallOption) (*ShimInfoResponse, error) {
+	out := new(ShimInfoResponse)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/ShimInfo", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf1.Empty, error) {
+	out := new(google_protobuf1.Empty)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Update", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *shimClient) Wait(ctx context.Context, in *WaitRequest, opts ...grpc.CallOption) (*WaitResponse, error) {
+	out := new(WaitResponse)
+	err := grpc.Invoke(ctx, "/containerd.runtime.linux.shim.v1.Shim/Wait", in, out, c.cc, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Server API for Shim service
+
+type ShimServer interface {
+	// State returns shim and task state information.
+	State(context.Context, *StateRequest) (*StateResponse, error)
+	Create(context.Context, *CreateTaskRequest) (*CreateTaskResponse, error)
+	Start(context.Context, *StartRequest) (*StartResponse, error)
+	Delete(context.Context, *google_protobuf1.Empty) (*DeleteResponse, error)
+	DeleteProcess(context.Context, *DeleteProcessRequest) (*DeleteResponse, error)
+	ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error)
+	Pause(context.Context, *google_protobuf1.Empty) (*google_protobuf1.Empty, error)
+	Resume(context.Context, *google_protobuf1.Empty) (*google_protobuf1.Empty, error)
+	Checkpoint(context.Context, *CheckpointTaskRequest) (*google_protobuf1.Empty, error)
+	Kill(context.Context, *KillRequest) (*google_protobuf1.Empty, error)
+	Exec(context.Context, *ExecProcessRequest) (*google_protobuf1.Empty, error)
+	ResizePty(context.Context, *ResizePtyRequest) (*google_protobuf1.Empty, error)
+	CloseIO(context.Context, *CloseIORequest) (*google_protobuf1.Empty, error)
+	// ShimInfo returns information about the shim.
+	ShimInfo(context.Context, *google_protobuf1.Empty) (*ShimInfoResponse, error)
+	Update(context.Context, *UpdateTaskRequest) (*google_protobuf1.Empty, error)
+	Wait(context.Context, *WaitRequest) (*WaitResponse, error)
+}
+
+func RegisterShimServer(s *grpc.Server, srv ShimServer) {
+	s.RegisterService(&_Shim_serviceDesc, srv)
+}
+
+func _Shim_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).State(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/State",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).State(ctx, req.(*StateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CreateTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Create(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Create",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Create(ctx, req.(*CreateTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Start_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StartRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Start(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Start",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Start(ctx, req.(*StartRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(google_protobuf1.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Delete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Delete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Delete(ctx, req.(*google_protobuf1.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_DeleteProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteProcessRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).DeleteProcess(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/DeleteProcess",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).DeleteProcess(ctx, req.(*DeleteProcessRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_ListPids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ListPidsRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).ListPids(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ListPids",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).ListPids(ctx, req.(*ListPidsRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Pause_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(google_protobuf1.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Pause(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Pause",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Pause(ctx, req.(*google_protobuf1.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Resume_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(google_protobuf1.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Resume(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Resume",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Resume(ctx, req.(*google_protobuf1.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Checkpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CheckpointTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Checkpoint(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Checkpoint",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Checkpoint(ctx, req.(*CheckpointTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Kill_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(KillRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Kill(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Kill",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Kill(ctx, req.(*KillRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Exec_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ExecProcessRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Exec(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Exec",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Exec(ctx, req.(*ExecProcessRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_ResizePty_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ResizePtyRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).ResizePty(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ResizePty",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).ResizePty(ctx, req.(*ResizePtyRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_CloseIO_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CloseIORequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).CloseIO(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/CloseIO",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).CloseIO(ctx, req.(*CloseIORequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_ShimInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(google_protobuf1.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).ShimInfo(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/ShimInfo",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).ShimInfo(ctx, req.(*google_protobuf1.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(UpdateTaskRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Update(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Update",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Update(ctx, req.(*UpdateTaskRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Shim_Wait_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(WaitRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ShimServer).Wait(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/containerd.runtime.linux.shim.v1.Shim/Wait",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ShimServer).Wait(ctx, req.(*WaitRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Shim_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "containerd.runtime.linux.shim.v1.Shim",
+	HandlerType: (*ShimServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "State",
+			Handler:    _Shim_State_Handler,
+		},
+		{
+			MethodName: "Create",
+			Handler:    _Shim_Create_Handler,
+		},
+		{
+			MethodName: "Start",
+			Handler:    _Shim_Start_Handler,
+		},
+		{
+			MethodName: "Delete",
+			Handler:    _Shim_Delete_Handler,
+		},
+		{
+			MethodName: "DeleteProcess",
+			Handler:    _Shim_DeleteProcess_Handler,
+		},
+		{
+			MethodName: "ListPids",
+			Handler:    _Shim_ListPids_Handler,
+		},
+		{
+			MethodName: "Pause",
+			Handler:    _Shim_Pause_Handler,
+		},
+		{
+			MethodName: "Resume",
+			Handler:    _Shim_Resume_Handler,
+		},
+		{
+			MethodName: "Checkpoint",
+			Handler:    _Shim_Checkpoint_Handler,
+		},
+		{
+			MethodName: "Kill",
+			Handler:    _Shim_Kill_Handler,
+		},
+		{
+			MethodName: "Exec",
+			Handler:    _Shim_Exec_Handler,
+		},
+		{
+			MethodName: "ResizePty",
+			Handler:    _Shim_ResizePty_Handler,
+		},
+		{
+			MethodName: "CloseIO",
+			Handler:    _Shim_CloseIO_Handler,
+		},
+		{
+			MethodName: "ShimInfo",
+			Handler:    _Shim_ShimInfo_Handler,
+		},
+		{
+			MethodName: "Update",
+			Handler:    _Shim_Update_Handler,
+		},
+		{
+			MethodName: "Wait",
+			Handler:    _Shim_Wait_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "github.com/containerd/containerd/linux/shim/v1/shim.proto",
+}
+
+func (m *CreateTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if len(m.Bundle) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
+		i += copy(dAtA[i:], m.Bundle)
+	}
+	if len(m.Runtime) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Runtime)))
+		i += copy(dAtA[i:], m.Runtime)
+	}
+	if len(m.Rootfs) > 0 {
+		for _, msg := range m.Rootfs {
+			dAtA[i] = 0x22
+			i++
+			i = encodeVarintShim(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	if m.Terminal {
+		dAtA[i] = 0x28
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.Stdin) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+		i += copy(dAtA[i:], m.Stdin)
+	}
+	if len(m.Stdout) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
+		i += copy(dAtA[i:], m.Stdout)
+	}
+	if len(m.Stderr) > 0 {
+		dAtA[i] = 0x42
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
+		i += copy(dAtA[i:], m.Stderr)
+	}
+	if len(m.Checkpoint) > 0 {
+		dAtA[i] = 0x4a
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Checkpoint)))
+		i += copy(dAtA[i:], m.Checkpoint)
+	}
+	if len(m.ParentCheckpoint) > 0 {
+		dAtA[i] = 0x52
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ParentCheckpoint)))
+		i += copy(dAtA[i:], m.ParentCheckpoint)
+	}
+	if m.Options != nil {
+		dAtA[i] = 0x5a
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Options.Size()))
+		n1, err := m.Options.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	return i, nil
+}
+
+func (m *CreateTaskResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateTaskResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Pid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+	}
+	return i, nil
+}
+
+func (m *DeleteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Pid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+	}
+	if m.ExitStatus != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+	}
+	dAtA[i] = 0x1a
+	i++
+	i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	return i, nil
+}
+
+func (m *DeleteProcessRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteProcessRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	return i, nil
+}
+
+func (m *ExecProcessRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExecProcessRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Terminal {
+		dAtA[i] = 0x10
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if len(m.Stdin) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+		i += copy(dAtA[i:], m.Stdin)
+	}
+	if len(m.Stdout) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
+		i += copy(dAtA[i:], m.Stdout)
+	}
+	if len(m.Stderr) > 0 {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
+		i += copy(dAtA[i:], m.Stderr)
+	}
+	if m.Spec != nil {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Spec.Size()))
+		n3, err := m.Spec.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n3
+	}
+	return i, nil
+}
+
+func (m *ExecProcessResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ExecProcessResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	return i, nil
+}
+
+func (m *ResizePtyRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResizePtyRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Width != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Width))
+	}
+	if m.Height != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Height))
+	}
+	return i, nil
+}
+
+func (m *StateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StateRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	return i, nil
+}
+
+func (m *StateResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StateResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if len(m.Bundle) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Bundle)))
+		i += copy(dAtA[i:], m.Bundle)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+	}
+	if m.Status != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Status))
+	}
+	if len(m.Stdin) > 0 {
+		dAtA[i] = 0x2a
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stdin)))
+		i += copy(dAtA[i:], m.Stdin)
+	}
+	if len(m.Stdout) > 0 {
+		dAtA[i] = 0x32
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stdout)))
+		i += copy(dAtA[i:], m.Stdout)
+	}
+	if len(m.Stderr) > 0 {
+		dAtA[i] = 0x3a
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Stderr)))
+		i += copy(dAtA[i:], m.Stderr)
+	}
+	if m.Terminal {
+		dAtA[i] = 0x40
+		i++
+		if m.Terminal {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	if m.ExitStatus != 0 {
+		dAtA[i] = 0x48
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+	}
+	dAtA[i] = 0x52
+	i++
+	i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n4, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n4
+	return i, nil
+}
+
+func (m *KillRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *KillRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Signal != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Signal))
+	}
+	if m.All {
+		dAtA[i] = 0x18
+		i++
+		if m.All {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	return i, nil
+}
+
+func (m *CloseIORequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CloseIORequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Stdin {
+		dAtA[i] = 0x10
+		i++
+		if m.Stdin {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i++
+	}
+	return i, nil
+}
+
+func (m *ListPidsRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListPidsRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	return i, nil
+}
+
+func (m *ListPidsResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ListPidsResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Processes) > 0 {
+		for _, msg := range m.Processes {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintShim(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *CheckpointTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CheckpointTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Path) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.Path)))
+		i += copy(dAtA[i:], m.Path)
+	}
+	if m.Options != nil {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Options.Size()))
+		n5, err := m.Options.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n5
+	}
+	return i, nil
+}
+
+func (m *ShimInfoResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ShimInfoResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.ShimPid != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.ShimPid))
+	}
+	return i, nil
+}
+
+func (m *UpdateTaskRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Resources != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Resources.Size()))
+		n6, err := m.Resources.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n6
+	}
+	return i, nil
+}
+
+func (m *StartRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StartRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	return i, nil
+}
+
+func (m *StartResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StartResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	if m.Pid != 0 {
+		dAtA[i] = 0x10
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.Pid))
+	}
+	return i, nil
+}
+
+func (m *WaitRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WaitRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(len(m.ID)))
+		i += copy(dAtA[i:], m.ID)
+	}
+	return i, nil
+}
+
+func (m *WaitResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WaitResponse) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.ExitStatus != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintShim(dAtA, i, uint64(m.ExitStatus))
+	}
+	dAtA[i] = 0x12
+	i++
+	i = encodeVarintShim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
+	n7, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n7
+	return i, nil
+}
+
+func encodeFixed64Shim(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Shim(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintShim(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *CreateTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Bundle)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Runtime)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if len(m.Rootfs) > 0 {
+		for _, e := range m.Rootfs {
+			l = e.Size()
+			n += 1 + l + sovShim(uint64(l))
+		}
+	}
+	if m.Terminal {
+		n += 2
+	}
+	l = len(m.Stdin)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Stdout)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Stderr)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Checkpoint)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.ParentCheckpoint)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Options != nil {
+		l = m.Options.Size()
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *CreateTaskResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Pid != 0 {
+		n += 1 + sovShim(uint64(m.Pid))
+	}
+	return n
+}
+
+func (m *DeleteResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.Pid != 0 {
+		n += 1 + sovShim(uint64(m.Pid))
+	}
+	if m.ExitStatus != 0 {
+		n += 1 + sovShim(uint64(m.ExitStatus))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
+	n += 1 + l + sovShim(uint64(l))
+	return n
+}
+
+func (m *DeleteProcessRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *ExecProcessRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Terminal {
+		n += 2
+	}
+	l = len(m.Stdin)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Stdout)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Stderr)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Spec != nil {
+		l = m.Spec.Size()
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *ExecProcessResponse) Size() (n int) {
+	var l int
+	_ = l
+	return n
+}
+
+func (m *ResizePtyRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Width != 0 {
+		n += 1 + sovShim(uint64(m.Width))
+	}
+	if m.Height != 0 {
+		n += 1 + sovShim(uint64(m.Height))
+	}
+	return n
+}
+
+func (m *StateRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *StateResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Bundle)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovShim(uint64(m.Pid))
+	}
+	if m.Status != 0 {
+		n += 1 + sovShim(uint64(m.Status))
+	}
+	l = len(m.Stdin)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Stdout)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	l = len(m.Stderr)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Terminal {
+		n += 2
+	}
+	if m.ExitStatus != 0 {
+		n += 1 + sovShim(uint64(m.ExitStatus))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
+	n += 1 + l + sovShim(uint64(l))
+	return n
+}
+
+func (m *KillRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Signal != 0 {
+		n += 1 + sovShim(uint64(m.Signal))
+	}
+	if m.All {
+		n += 2
+	}
+	return n
+}
+
+func (m *CloseIORequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Stdin {
+		n += 2
+	}
+	return n
+}
+
+func (m *ListPidsRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *ListPidsResponse) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Processes) > 0 {
+		for _, e := range m.Processes {
+			l = e.Size()
+			n += 1 + l + sovShim(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *CheckpointTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Path)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Options != nil {
+		l = m.Options.Size()
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *ShimInfoResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.ShimPid != 0 {
+		n += 1 + sovShim(uint64(m.ShimPid))
+	}
+	return n
+}
+
+func (m *UpdateTaskRequest) Size() (n int) {
+	var l int
+	_ = l
+	if m.Resources != nil {
+		l = m.Resources.Size()
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *StartRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *StartResponse) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	if m.Pid != 0 {
+		n += 1 + sovShim(uint64(m.Pid))
+	}
+	return n
+}
+
+func (m *WaitRequest) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ID)
+	if l > 0 {
+		n += 1 + l + sovShim(uint64(l))
+	}
+	return n
+}
+
+func (m *WaitResponse) Size() (n int) {
+	var l int
+	_ = l
+	if m.ExitStatus != 0 {
+		n += 1 + sovShim(uint64(m.ExitStatus))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
+	n += 1 + l + sovShim(uint64(l))
+	return n
+}
+
+func sovShim(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozShim(x uint64) (n int) {
+	return sovShim(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CreateTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateTaskRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`,
+		`Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`,
+		`Rootfs:` + strings.Replace(fmt.Sprintf("%v", this.Rootfs), "Mount", "containerd_types.Mount", 1) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`Checkpoint:` + fmt.Sprintf("%v", this.Checkpoint) + `,`,
+		`ParentCheckpoint:` + fmt.Sprintf("%v", this.ParentCheckpoint) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CreateTaskResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateTaskResponse{`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteResponse{`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DeleteProcessRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DeleteProcessRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ExecProcessRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ExecProcessRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ExecProcessResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ExecProcessResponse{`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResizePtyRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResizePtyRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Width:` + fmt.Sprintf("%v", this.Width) + `,`,
+		`Height:` + fmt.Sprintf("%v", this.Height) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StateRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StateRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StateResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StateResponse{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Bundle:` + fmt.Sprintf("%v", this.Bundle) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`Status:` + fmt.Sprintf("%v", this.Status) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
+		`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
+		`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
+		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *KillRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&KillRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Signal:` + fmt.Sprintf("%v", this.Signal) + `,`,
+		`All:` + fmt.Sprintf("%v", this.All) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CloseIORequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CloseIORequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListPidsRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListPidsRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ListPidsResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ListPidsResponse{`,
+		`Processes:` + strings.Replace(fmt.Sprintf("%v", this.Processes), "ProcessInfo", "containerd_v1_types.ProcessInfo", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *CheckpointTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CheckpointTaskRequest{`,
+		`Path:` + fmt.Sprintf("%v", this.Path) + `,`,
+		`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ShimInfoResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ShimInfoResponse{`,
+		`ShimPid:` + fmt.Sprintf("%v", this.ShimPid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *UpdateTaskRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&UpdateTaskRequest{`,
+		`Resources:` + strings.Replace(fmt.Sprintf("%v", this.Resources), "Any", "google_protobuf.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StartRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StartRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *StartResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&StartResponse{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`Pid:` + fmt.Sprintf("%v", this.Pid) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WaitRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WaitRequest{`,
+		`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *WaitResponse) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&WaitResponse{`,
+		`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
+		`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf3.Timestamp", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringShim(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Bundle = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Runtime = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Rootfs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Rootfs = append(m.Rootfs, &containerd_types.Mount{})
+			if err := m.Rootfs[len(m.Rootfs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdin = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdout = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stderr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Checkpoint", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Checkpoint = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ParentCheckpoint", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ParentCheckpoint = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = &google_protobuf.Any{}
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CreateTaskResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateTaskResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateTaskResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
+			}
+			m.ExitStatus = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteProcessRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteProcessRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExecProcessRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExecProcessRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExecProcessRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdin = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdout = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stderr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Spec == nil {
+				m.Spec = &google_protobuf.Any{}
+			}
+			if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ExecProcessResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ExecProcessResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ExecProcessResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResizePtyRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResizePtyRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResizePtyRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Width", wireType)
+			}
+			m.Width = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Width |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType)
+			}
+			m.Height = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Height |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StateResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StateResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Bundle", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Bundle = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
+			}
+			m.Status = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Status |= (containerd_v1_types.Status(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdin = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stdout = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Stderr = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Terminal", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Terminal = bool(v != 0)
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
+			}
+			m.ExitStatus = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *KillRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: KillRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: KillRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType)
+			}
+			m.Signal = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Signal |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field All", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.All = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CloseIORequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CloseIORequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CloseIORequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stdin = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListPidsRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListPidsRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListPidsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ListPidsResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ListPidsResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ListPidsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Processes", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Processes = append(m.Processes, &containerd_v1_types.ProcessInfo{})
+			if err := m.Processes[len(m.Processes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CheckpointTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CheckpointTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CheckpointTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Options == nil {
+				m.Options = &google_protobuf.Any{}
+			}
+			if err := m.Options.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ShimInfoResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ShimInfoResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ShimInfoResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ShimPid", wireType)
+			}
+			m.ShimPid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ShimPid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: UpdateTaskRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: UpdateTaskRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Resources == nil {
+				m.Resources = &google_protobuf.Any{}
+			}
+			if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StartRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StartRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StartRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StartResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StartResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StartResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Pid", wireType)
+			}
+			m.Pid = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Pid |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WaitRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WaitRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WaitRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WaitResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WaitResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WaitResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitStatus", wireType)
+			}
+			m.ExitStatus = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ExitStatus |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthShim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipShim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthShim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipShim(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowShim
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowShim
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthShim
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowShim
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipShim(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthShim = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowShim   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/linux/shim/v1/shim.proto", fileDescriptorShim)
+}
+
+var fileDescriptorShim = []byte{
+	// 1131 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0x4f, 0x4f, 0x1b, 0x47,
+	0x14, 0x67, 0x8d, 0xff, 0x3e, 0xc7, 0x14, 0xa6, 0x84, 0x6e, 0x1c, 0xc9, 0x58, 0x2b, 0x35, 0xa2,
+	0xaa, 0xb2, 0x2e, 0xa6, 0x4a, 0x9a, 0x56, 0x8a, 0x04, 0x24, 0xaa, 0x50, 0x8b, 0x82, 0x16, 0xd2,
+	0x54, 0xad, 0x2a, 0xb4, 0x78, 0x07, 0x7b, 0x84, 0xbd, 0xb3, 0xd9, 0x99, 0xa5, 0xd0, 0x53, 0x4f,
+	0x3d, 0xf7, 0xe3, 0xf4, 0x23, 0x70, 0xc8, 0xa1, 0xc7, 0x9e, 0xd2, 0xc6, 0xf7, 0x7e, 0x87, 0x68,
+	0xfe, 0x18, 0xaf, 0x6d, 0x36, 0xbb, 0xe6, 0x82, 0xf7, 0xcd, 0xfc, 0xde, 0xcc, 0x9b, 0xf7, 0xfb,
+	0xcd, 0x7b, 0x03, 0x3c, 0xe9, 0x12, 0xde, 0x8b, 0x4e, 0xec, 0x0e, 0x1d, 0xb4, 0x3a, 0xd4, 0xe7,
+	0x2e, 0xf1, 0x71, 0xe8, 0xc5, 0x3f, 0xfb, 0xc4, 0x8f, 0x2e, 0x5a, 0xac, 0x47, 0x06, 0xad, 0xf3,
+	0x4d, 0xf9, 0x6b, 0x07, 0x21, 0xe5, 0x14, 0x35, 0xc7, 0x20, 0x3b, 0x8c, 0x7c, 0x4e, 0x06, 0xd8,
+	0x96, 0x60, 0x5b, 0x82, 0xce, 0x37, 0xeb, 0xf7, 0xba, 0x94, 0x76, 0xfb, 0xb8, 0x25, 0xf1, 0x27,
+	0xd1, 0x69, 0xcb, 0xf5, 0x2f, 0x95, 0x73, 0xfd, 0xfe, 0xf4, 0x14, 0x1e, 0x04, 0x7c, 0x34, 0xb9,
+	0xda, 0xa5, 0x5d, 0x2a, 0x3f, 0x5b, 0xe2, 0x4b, 0x8f, 0xae, 0x4f, 0xbb, 0x88, 0x1d, 0x19, 0x77,
+	0x07, 0x81, 0x06, 0x3c, 0x4a, 0x3d, 0x8b, 0x1b, 0x90, 0x16, 0xbf, 0x0c, 0x30, 0x6b, 0x0d, 0x68,
+	0xe4, 0x73, 0xed, 0xf7, 0xf5, 0x1c, 0x7e, 0xdc, 0x65, 0x67, 0xf2, 0x8f, 0xf2, 0xb5, 0xfe, 0xcf,
+	0xc1, 0xca, 0x6e, 0x88, 0x5d, 0x8e, 0x8f, 0x5c, 0x76, 0xe6, 0xe0, 0xd7, 0x11, 0x66, 0x1c, 0xad,
+	0x41, 0x8e, 0x78, 0xa6, 0xd1, 0x34, 0x36, 0x2a, 0x3b, 0xc5, 0xe1, 0xdb, 0xf5, 0xdc, 0xde, 0x33,
+	0x27, 0x47, 0x3c, 0xb4, 0x06, 0xc5, 0x93, 0xc8, 0xf7, 0xfa, 0xd8, 0xcc, 0x89, 0x39, 0x47, 0x5b,
+	0xc8, 0x84, 0x92, 0xce, 0xa0, 0xb9, 0x28, 0x27, 0x46, 0x26, 0x6a, 0x41, 0x31, 0xa4, 0x94, 0x9f,
+	0x32, 0x33, 0xdf, 0x5c, 0xdc, 0xa8, 0xb6, 0x3f, 0xb1, 0x63, 0x59, 0x97, 0x21, 0xd9, 0xfb, 0xe2,
+	0x28, 0x8e, 0x86, 0xa1, 0x3a, 0x94, 0x39, 0x0e, 0x07, 0xc4, 0x77, 0xfb, 0x66, 0xa1, 0x69, 0x6c,
+	0x94, 0x9d, 0x6b, 0x1b, 0xad, 0x42, 0x81, 0x71, 0x8f, 0xf8, 0x66, 0x51, 0x6e, 0xa2, 0x0c, 0x11,
+	0x14, 0xe3, 0x1e, 0x8d, 0xb8, 0x59, 0x52, 0x41, 0x29, 0x4b, 0x8f, 0xe3, 0x30, 0x34, 0xcb, 0xd7,
+	0xe3, 0x38, 0x0c, 0x51, 0x03, 0xa0, 0xd3, 0xc3, 0x9d, 0xb3, 0x80, 0x12, 0x9f, 0x9b, 0x15, 0x39,
+	0x17, 0x1b, 0x41, 0x9f, 0xc3, 0x4a, 0xe0, 0x86, 0xd8, 0xe7, 0xc7, 0x31, 0x18, 0x48, 0xd8, 0xb2,
+	0x9a, 0xd8, 0x1d, 0x83, 0x6d, 0x28, 0xd1, 0x80, 0x13, 0xea, 0x33, 0xb3, 0xda, 0x34, 0x36, 0xaa,
+	0xed, 0x55, 0x5b, 0xd1, 0x6c, 0x8f, 0x68, 0xb6, 0xb7, 0xfd, 0x4b, 0x67, 0x04, 0xb2, 0x1e, 0x00,
+	0x8a, 0xa7, 0x9b, 0x05, 0xd4, 0x67, 0x18, 0x2d, 0xc3, 0x62, 0xa0, 0x13, 0x5e, 0x73, 0xc4, 0xa7,
+	0xf5, 0x87, 0x01, 0x4b, 0xcf, 0x70, 0x1f, 0x73, 0x9c, 0x0c, 0x42, 0xeb, 0x50, 0xc5, 0x17, 0x84,
+	0x1f, 0x33, 0xee, 0xf2, 0x88, 0x49, 0x4e, 0x6a, 0x0e, 0x88, 0xa1, 0x43, 0x39, 0x82, 0xb6, 0xa1,
+	0x22, 0x2c, 0xec, 0x1d, 0xbb, 0x5c, 0x32, 0x53, 0x6d, 0xd7, 0x67, 0xe2, 0x3b, 0x1a, 0xc9, 0x70,
+	0xa7, 0x7c, 0xf5, 0x76, 0x7d, 0xe1, 0xcf, 0x7f, 0xd7, 0x0d, 0xa7, 0xac, 0xdc, 0xb6, 0xb9, 0x65,
+	0xc3, 0xaa, 0x8a, 0xe3, 0x20, 0xa4, 0x1d, 0xcc, 0x58, 0x8a, 0x44, 0xac, 0xbf, 0x0c, 0x40, 0xcf,
+	0x2f, 0x70, 0x27, 0x1b, 0x7c, 0x82, 0xee, 0x5c, 0x12, 0xdd, 0x8b, 0x37, 0xd3, 0x9d, 0x4f, 0xa0,
+	0xbb, 0x30, 0x41, 0xf7, 0x06, 0xe4, 0x59, 0x80, 0x3b, 0x52, 0x33, 0x49, 0xf4, 0x48, 0x84, 0x75,
+	0x17, 0x3e, 0x9e, 0x88, 0x5c, 0xe5, 0xdd, 0xfa, 0x11, 0x96, 0x1d, 0xcc, 0xc8, 0x6f, 0xf8, 0x80,
+	0x5f, 0xa6, 0x1d, 0x67, 0x15, 0x0a, 0xbf, 0x12, 0x8f, 0xf7, 0x34, 0x17, 0xca, 0x10, 0xa1, 0xf5,
+	0x30, 0xe9, 0xf6, 0x14, 0x07, 0x35, 0x47, 0x5b, 0xd6, 0x03, 0xb8, 0x23, 0x88, 0xc2, 0x69, 0x39,
+	0x7d, 0x93, 0x83, 0x9a, 0x06, 0x6a, 0x2d, 0xcc, 0x7b, 0x41, 0xb5, 0x76, 0x16, 0xc7, 0xda, 0xd9,
+	0x12, 0xe9, 0x92, 0xb2, 0x11, 0x69, 0x5c, 0x6a, 0xdf, 0x8f, 0x5f, 0xcc, 0xf3, 0x4d, 0x7d, 0x37,
+	0x95, 0x8e, 0x1c, 0x0d, 0x1d, 0x33, 0x52, 0xb8, 0x99, 0x91, 0x62, 0x02, 0x23, 0xa5, 0x09, 0x46,
+	0xe2, 0x9c, 0x97, 0xa7, 0x38, 0x9f, 0x92, 0x74, 0xe5, 0xc3, 0x92, 0x86, 0x5b, 0x49, 0xfa, 0x05,
+	0x54, 0xbf, 0x23, 0xfd, 0x7e, 0x86, 0x62, 0xc7, 0x48, 0x77, 0x24, 0xcc, 0x9a, 0xa3, 0x2d, 0x91,
+	0x4b, 0xb7, 0xdf, 0x97, 0xb9, 0x2c, 0x3b, 0xe2, 0xd3, 0x7a, 0x0a, 0x4b, 0xbb, 0x7d, 0xca, 0xf0,
+	0xde, 0x8b, 0x0c, 0xfa, 0x50, 0x09, 0x54, 0x5a, 0x57, 0x86, 0xf5, 0x19, 0x7c, 0xf4, 0x3d, 0x61,
+	0xfc, 0x80, 0x78, 0xa9, 0xd7, 0xcb, 0x81, 0xe5, 0x31, 0x54, 0x8b, 0xe1, 0x29, 0x54, 0x02, 0xa5,
+	0x59, 0xcc, 0x4c, 0x43, 0x96, 0xd9, 0xe6, 0x8d, 0x6c, 0x6a, 0x65, 0xef, 0xf9, 0xa7, 0xd4, 0x19,
+	0xbb, 0x58, 0x3f, 0xc3, 0xdd, 0x71, 0x45, 0x8b, 0xb7, 0x01, 0x04, 0xf9, 0xc0, 0xe5, 0x3d, 0x15,
+	0x86, 0x23, 0xbf, 0xe3, 0x05, 0x2f, 0x97, 0xa5, 0xe0, 0x3d, 0x84, 0xe5, 0xc3, 0x1e, 0x19, 0xc8,
+	0x3d, 0x47, 0x01, 0xdf, 0x83, 0xb2, 0x68, 0xb1, 0xc7, 0xe3, 0x72, 0x56, 0x12, 0xf6, 0x01, 0xf1,
+	0xac, 0x6f, 0x61, 0xe5, 0x65, 0xe0, 0x4d, 0xb5, 0xa3, 0x36, 0x54, 0x42, 0xcc, 0x68, 0x14, 0x76,
+	0xe4, 0x01, 0x93, 0x77, 0x1d, 0xc3, 0xf4, 0xdd, 0x0a, 0x79, 0x5a, 0x42, 0x9f, 0xc8, 0xab, 0x25,
+	0x70, 0x29, 0x57, 0x4b, 0x5f, 0xa1, 0xdc, 0xb8, 0x46, 0x7f, 0x0a, 0xd5, 0x57, 0x2e, 0x49, 0xdd,
+	0x21, 0x84, 0x3b, 0x0a, 0xa6, 0x37, 0x98, 0x92, 0xb8, 0xf1, 0x61, 0x89, 0xe7, 0x6e, 0x23, 0xf1,
+	0xf6, 0x9b, 0x2a, 0xe4, 0x45, 0xda, 0x51, 0x0f, 0x0a, 0xb2, 0x72, 0x20, 0xdb, 0x4e, 0x7b, 0xee,
+	0xd8, 0xf1, 0x5a, 0x54, 0x6f, 0x65, 0xc6, 0xeb, 0x63, 0x31, 0x28, 0xaa, 0xce, 0x86, 0xb6, 0xd2,
+	0x5d, 0x67, 0x9e, 0x1c, 0xf5, 0x2f, 0xe7, 0x73, 0xd2, 0x9b, 0xaa, 0xe3, 0x85, 0x3c, 0xe3, 0xf1,
+	0xae, 0xe5, 0x90, 0xf1, 0x78, 0x31, 0x59, 0x38, 0x50, 0x54, 0x7d, 0x10, 0xad, 0xcd, 0x70, 0xf1,
+	0x5c, 0xbc, 0xfd, 0xea, 0x5f, 0xa4, 0x2f, 0x39, 0xd5, 0xd1, 0x2f, 0xa1, 0x36, 0xd1, 0x5b, 0xd1,
+	0xa3, 0xac, 0x4b, 0x4c, 0x76, 0xd7, 0x5b, 0x6c, 0xfd, 0x1a, 0xca, 0xa3, 0x3a, 0x82, 0x36, 0xd3,
+	0xbd, 0xa7, 0xca, 0x53, 0xbd, 0x3d, 0x8f, 0x8b, 0xde, 0xf2, 0x31, 0x14, 0x0e, 0xdc, 0x88, 0x25,
+	0x27, 0x30, 0x61, 0x1c, 0x7d, 0x05, 0x45, 0x07, 0xb3, 0x68, 0x30, 0xbf, 0xe7, 0x2f, 0x00, 0xb1,
+	0xb7, 0xda, 0xe3, 0x0c, 0x12, 0xbb, 0xa9, 0x0e, 0x26, 0x2e, 0xbf, 0x0f, 0x79, 0xd1, 0x48, 0xd0,
+	0xc3, 0xf4, 0x85, 0x63, 0x0d, 0x27, 0x71, 0xb9, 0x23, 0xc8, 0x8b, 0xf7, 0x07, 0xca, 0x70, 0x15,
+	0x66, 0x5f, 0x58, 0x89, 0xab, 0xbe, 0x82, 0xca, 0xf5, 0xf3, 0x05, 0x65, 0xe0, 0x6d, 0xfa, 0xad,
+	0x93, 0xb8, 0xf0, 0x21, 0x94, 0x74, 0xd7, 0x43, 0x19, 0xf4, 0x37, 0xd9, 0x20, 0x13, 0x17, 0xfd,
+	0x01, 0xca, 0xa3, 0x76, 0x91, 0xc8, 0x76, 0x86, 0x43, 0xcc, 0xb4, 0x9c, 0x97, 0x50, 0x54, 0x7d,
+	0x25, 0x4b, 0x75, 0x9a, 0xe9, 0x40, 0x89, 0xe1, 0x62, 0xc8, 0x8b, 0xda, 0x9e, 0x45, 0x01, 0xb1,
+	0x56, 0x51, 0xb7, 0xb3, 0xc2, 0x55, 0xf4, 0x3b, 0xfb, 0x57, 0xef, 0x1a, 0x0b, 0xff, 0xbc, 0x6b,
+	0x2c, 0xfc, 0x3e, 0x6c, 0x18, 0x57, 0xc3, 0x86, 0xf1, 0xf7, 0xb0, 0x61, 0xfc, 0x37, 0x6c, 0x18,
+	0x3f, 0x6d, 0xcd, 0xf7, 0xff, 0xef, 0x37, 0xe2, 0xf7, 0xa4, 0x28, 0x4f, 0xb1, 0xf5, 0x3e, 0x00,
+	0x00, 0xff, 0xff, 0xad, 0x60, 0x08, 0x28, 0x3d, 0x0f, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto
new file mode 100644
index 0000000..8d8af95
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/shim/v1/shim.proto
@@ -0,0 +1,165 @@
+syntax = "proto3";
+
+package containerd.runtime.linux.shim.v1;
+
+import "google/protobuf/any.proto";
+import "google/protobuf/empty.proto";
+import "gogoproto/gogo.proto";
+import "google/protobuf/timestamp.proto";
+import "github.com/containerd/containerd/api/types/mount.proto";
+import "github.com/containerd/containerd/api/types/task/task.proto";
+
+option go_package = "github.com/containerd/containerd/linux/shim/v1;shim";
+
+// Shim service is launched for each container and is responsible for owning the IO
+// for the container and its additional processes.  The shim is also the parent of
+// each container and allows reattaching to the IO and receiving the exit status
+// for the container processes.
+service Shim {
+	// State returns shim and task state information.
+	rpc State(StateRequest) returns (StateResponse);
+
+	rpc Create(CreateTaskRequest) returns (CreateTaskResponse);
+
+	rpc Start(StartRequest) returns (StartResponse);
+
+	rpc Delete(google.protobuf.Empty) returns (DeleteResponse);
+
+	rpc DeleteProcess(DeleteProcessRequest) returns (DeleteResponse);
+
+	rpc ListPids(ListPidsRequest) returns (ListPidsResponse);
+
+	rpc Pause(google.protobuf.Empty) returns (google.protobuf.Empty);
+
+	rpc Resume(google.protobuf.Empty) returns (google.protobuf.Empty);
+
+	rpc Checkpoint(CheckpointTaskRequest) returns (google.protobuf.Empty);
+
+	rpc Kill(KillRequest) returns (google.protobuf.Empty);
+
+	rpc Exec(ExecProcessRequest) returns (google.protobuf.Empty);
+
+	rpc ResizePty(ResizePtyRequest) returns (google.protobuf.Empty);
+
+	rpc CloseIO(CloseIORequest) returns (google.protobuf.Empty);
+
+	// ShimInfo returns information about the shim.
+	rpc ShimInfo(google.protobuf.Empty) returns (ShimInfoResponse);
+
+	rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);
+
+	rpc Wait(WaitRequest) returns (WaitResponse);
+}
+
+message CreateTaskRequest {
+	string id = 1;
+	string bundle = 2;
+	string runtime = 3;
+	repeated containerd.types.Mount rootfs = 4;
+	bool terminal = 5;
+	string stdin = 6;
+	string stdout = 7;
+	string stderr = 8;
+	string checkpoint = 9;
+	string parent_checkpoint = 10;
+	google.protobuf.Any options = 11;
+}
+
+message CreateTaskResponse {
+	uint32 pid = 1;
+}
+
+message DeleteResponse {
+	uint32 pid = 1;
+	uint32 exit_status = 2;
+	google.protobuf.Timestamp exited_at = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message DeleteProcessRequest {
+	string id = 1;
+}
+
+message ExecProcessRequest {
+	string id = 1;
+	bool terminal = 2;
+	string stdin = 3;
+	string stdout = 4;
+	string stderr = 5;
+	google.protobuf.Any spec = 6;
+}
+
+message ExecProcessResponse {
+}
+
+message ResizePtyRequest {
+	string id = 1;
+	uint32 width = 2;
+	uint32 height = 3;
+}
+
+message StateRequest {
+	string id = 1;
+}
+
+message StateResponse {
+	string id = 1;
+	string bundle = 2;
+	uint32 pid = 3;
+	containerd.v1.types.Status status = 4;
+	string stdin = 5;
+	string stdout = 6;
+	string stderr = 7;
+	bool terminal = 8;
+	uint32 exit_status = 9;
+	google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
+
+message KillRequest {
+	string id = 1;
+	uint32 signal = 2;
+	bool all = 3;
+}
+
+message CloseIORequest {
+	string id = 1;
+	bool stdin = 2;
+}
+
+message ListPidsRequest {
+	string id = 1;
+}
+
+message ListPidsResponse {
+	repeated containerd.v1.types.ProcessInfo processes = 1;
+}
+
+message CheckpointTaskRequest {
+	string path = 1;
+	google.protobuf.Any options = 2;
+}
+
+message ShimInfoResponse {
+	uint32 shim_pid = 1;
+}
+
+message UpdateTaskRequest {
+	google.protobuf.Any resources = 1;
+}
+
+message StartRequest {
+	string id = 1;
+}
+
+message StartResponse {
+	string id = 1;
+	uint32 pid = 2;
+}
+
+message WaitRequest {
+	string id = 1;
+}
+
+message WaitResponse {
+	uint32 exit_status = 1;
+	google.protobuf.Timestamp exited_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/containerd/containerd/linux/task.go b/vendor/github.com/containerd/containerd/linux/task.go
new file mode 100644
index 0000000..a851ee7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/linux/task.go
@@ -0,0 +1,294 @@
+// +build linux
+
+package linux
+
+import (
+	"context"
+
+	"github.com/pkg/errors"
+	"google.golang.org/grpc"
+
+	"github.com/containerd/cgroups"
+	"github.com/containerd/containerd/api/types/task"
+	"github.com/containerd/containerd/errdefs"
+	client "github.com/containerd/containerd/linux/shim"
+	shim "github.com/containerd/containerd/linux/shim/v1"
+	"github.com/containerd/containerd/runtime"
+	"github.com/gogo/protobuf/types"
+)
+
+// Task on a linux based system
+type Task struct {
+	id        string
+	pid       int
+	shim      *client.Client
+	namespace string
+	cg        cgroups.Cgroup
+	monitor   runtime.TaskMonitor
+}
+
+func newTask(id, namespace string, pid int, shim *client.Client, monitor runtime.TaskMonitor) (*Task, error) {
+	var (
+		err error
+		cg  cgroups.Cgroup
+	)
+	if pid > 0 {
+		cg, err = cgroups.Load(cgroups.V1, cgroups.PidPath(pid))
+		if err != nil && err != cgroups.ErrCgroupDeleted {
+			return nil, err
+		}
+	}
+	return &Task{
+		id:        id,
+		pid:       pid,
+		shim:      shim,
+		namespace: namespace,
+		cg:        cg,
+		monitor:   monitor,
+	}, nil
+}
+
+// ID of the task
+func (t *Task) ID() string {
+	return t.id
+}
+
+// Info returns task information about the runtime and namespace
+func (t *Task) Info() runtime.TaskInfo {
+	return runtime.TaskInfo{
+		ID:        t.id,
+		Runtime:   pluginID,
+		Namespace: t.namespace,
+	}
+}
+
+// Start the task
+func (t *Task) Start(ctx context.Context) error {
+	hasCgroup := t.cg != nil
+	r, err := t.shim.Start(ctx, &shim.StartRequest{
+		ID: t.id,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	t.pid = int(r.Pid)
+	if !hasCgroup {
+		cg, err := cgroups.Load(cgroups.V1, cgroups.PidPath(t.pid))
+		if err != nil {
+			return err
+		}
+		t.cg = cg
+		if err := t.monitor.Monitor(t); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// State returns runtime information for the task
+func (t *Task) State(ctx context.Context) (runtime.State, error) {
+	response, err := t.shim.State(ctx, &shim.StateRequest{
+		ID: t.id,
+	})
+	if err != nil {
+		if err != grpc.ErrServerStopped {
+			return runtime.State{}, errdefs.FromGRPC(err)
+		}
+		return runtime.State{}, errdefs.ErrNotFound
+	}
+	var status runtime.Status
+	switch response.Status {
+	case task.StatusCreated:
+		status = runtime.CreatedStatus
+	case task.StatusRunning:
+		status = runtime.RunningStatus
+	case task.StatusStopped:
+		status = runtime.StoppedStatus
+	case task.StatusPaused:
+		status = runtime.PausedStatus
+	case task.StatusPausing:
+		status = runtime.PausingStatus
+	}
+	return runtime.State{
+		Pid:        response.Pid,
+		Status:     status,
+		Stdin:      response.Stdin,
+		Stdout:     response.Stdout,
+		Stderr:     response.Stderr,
+		Terminal:   response.Terminal,
+		ExitStatus: response.ExitStatus,
+		ExitedAt:   response.ExitedAt,
+	}, nil
+}
+
+// Pause the task and all processes
+func (t *Task) Pause(ctx context.Context) error {
+	_, err := t.shim.Pause(ctx, empty)
+	if err != nil {
+		err = errdefs.FromGRPC(err)
+	}
+	return err
+}
+
+// Resume the task and all processes
+func (t *Task) Resume(ctx context.Context) error {
+	if _, err := t.shim.Resume(ctx, empty); err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return nil
+}
+
+// Kill the task using the provided signal
+//
+// Optionally send the signal to all processes that are a child of the task
+func (t *Task) Kill(ctx context.Context, signal uint32, all bool) error {
+	if _, err := t.shim.Kill(ctx, &shim.KillRequest{
+		ID:     t.id,
+		Signal: signal,
+		All:    all,
+	}); err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return nil
+}
+
+// Exec creates a new process inside the task
+func (t *Task) Exec(ctx context.Context, id string, opts runtime.ExecOpts) (runtime.Process, error) {
+	request := &shim.ExecProcessRequest{
+		ID:       id,
+		Stdin:    opts.IO.Stdin,
+		Stdout:   opts.IO.Stdout,
+		Stderr:   opts.IO.Stderr,
+		Terminal: opts.IO.Terminal,
+		Spec:     opts.Spec,
+	}
+	if _, err := t.shim.Exec(ctx, request); err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return &Process{
+		id: id,
+		t:  t,
+	}, nil
+}
+
+// Pids returns all system level process ids running inside the task
+func (t *Task) Pids(ctx context.Context) ([]runtime.ProcessInfo, error) {
+	resp, err := t.shim.ListPids(ctx, &shim.ListPidsRequest{
+		ID: t.id,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	var processList []runtime.ProcessInfo
+	for _, p := range resp.Processes {
+		processList = append(processList, runtime.ProcessInfo{
+			Pid: p.Pid,
+		})
+	}
+	return processList, nil
+}
+
+// ResizePty changes the side of the task's PTY to the provided width and height
+func (t *Task) ResizePty(ctx context.Context, size runtime.ConsoleSize) error {
+	_, err := t.shim.ResizePty(ctx, &shim.ResizePtyRequest{
+		ID:     t.id,
+		Width:  size.Width,
+		Height: size.Height,
+	})
+	if err != nil {
+		err = errdefs.FromGRPC(err)
+	}
+	return err
+}
+
+// CloseIO closes the provided IO on the task
+func (t *Task) CloseIO(ctx context.Context) error {
+	_, err := t.shim.CloseIO(ctx, &shim.CloseIORequest{
+		ID:    t.id,
+		Stdin: true,
+	})
+	if err != nil {
+		err = errdefs.FromGRPC(err)
+	}
+	return err
+}
+
+// Checkpoint creates a system level dump of the task and process information that can be later restored
+func (t *Task) Checkpoint(ctx context.Context, path string, options *types.Any) error {
+	r := &shim.CheckpointTaskRequest{
+		Path:    path,
+		Options: options,
+	}
+	if _, err := t.shim.Checkpoint(ctx, r); err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return nil
+}
+
+// DeleteProcess removes the provided process from the task and deletes all on disk state
+func (t *Task) DeleteProcess(ctx context.Context, id string) (*runtime.Exit, error) {
+	r, err := t.shim.DeleteProcess(ctx, &shim.DeleteProcessRequest{
+		ID: id,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return &runtime.Exit{
+		Status:    r.ExitStatus,
+		Timestamp: r.ExitedAt,
+		Pid:       r.Pid,
+	}, nil
+}
+
+// Update changes runtime information of a running task
+func (t *Task) Update(ctx context.Context, resources *types.Any) error {
+	if _, err := t.shim.Update(ctx, &shim.UpdateTaskRequest{
+		Resources: resources,
+	}); err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return nil
+}
+
+// Process returns a specific process inside the task by the process id
+func (t *Task) Process(ctx context.Context, id string) (runtime.Process, error) {
+	// TODO: verify process exists for container
+	return &Process{
+		id: id,
+		t:  t,
+	}, nil
+}
+
+// Metrics returns runtime specific system level metric information for the task
+func (t *Task) Metrics(ctx context.Context) (interface{}, error) {
+	if t.cg == nil {
+		return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist")
+	}
+	stats, err := t.cg.Stat(cgroups.IgnoreNotExist)
+	if err != nil {
+		return nil, err
+	}
+	return stats, nil
+}
+
+// Cgroup returns the underlying cgroup for a linux task
+func (t *Task) Cgroup() (cgroups.Cgroup, error) {
+	if t.cg == nil {
+		return nil, errors.Wrap(errdefs.ErrNotFound, "cgroup does not exist")
+	}
+	return t.cg, nil
+}
+
+// Wait for the task to exit returning the status and timestamp
+func (t *Task) Wait(ctx context.Context) (*runtime.Exit, error) {
+	r, err := t.shim.Wait(ctx, &shim.WaitRequest{
+		ID: t.id,
+	})
+	if err != nil {
+		return nil, err
+	}
+	return &runtime.Exit{
+		Timestamp: r.ExitedAt,
+		Status:    r.ExitStatus,
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go
new file mode 100644
index 0000000..1081719
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/log/context.go
@@ -0,0 +1,81 @@
+package log
+
+import (
+	"context"
+	"path"
+
+	"github.com/sirupsen/logrus"
+)
+
+var (
+	// G is an alias for GetLogger.
+	//
+	// We may want to define this locally to a package to get package tagged log
+	// messages.
+	G = GetLogger
+
+	// L is an alias for the the standard logger.
+	L = logrus.NewEntry(logrus.StandardLogger())
+)
+
+type (
+	loggerKey struct{}
+	moduleKey struct{}
+)
+
+// WithLogger returns a new context with the provided logger. Use in
+// combination with logger.WithField(s) for great effect.
+func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context {
+	return context.WithValue(ctx, loggerKey{}, logger)
+}
+
+// GetLogger retrieves the current logger from the context. If no logger is
+// available, the default logger is returned.
+func GetLogger(ctx context.Context) *logrus.Entry {
+	logger := ctx.Value(loggerKey{})
+
+	if logger == nil {
+		return L
+	}
+
+	return logger.(*logrus.Entry)
+}
+
+// WithModule adds the module to the context, appending it with a slash if a
+// module already exists. A module is just an roughly correlated defined by the
+// call tree for a given context.
+//
+// As an example, we might have a "node" module already part of a context. If
+// this function is called with "tls", the new value of module will be
+// "node/tls".
+//
+// Modules represent the call path. If the new module and last module are the
+// same, a new module entry will not be created. If the new module and old
+// older module are the same but separated by other modules, the cycle will be
+// represented by the module path.
+func WithModule(ctx context.Context, module string) context.Context {
+	parent := GetModulePath(ctx)
+
+	if parent != "" {
+		// don't re-append module when module is the same.
+		if path.Base(parent) == module {
+			return ctx
+		}
+
+		module = path.Join(parent, module)
+	}
+
+	ctx = WithLogger(ctx, GetLogger(ctx).WithField("module", module))
+	return context.WithValue(ctx, moduleKey{}, module)
+}
+
+// GetModulePath returns the module path for the provided context. If no module
+// is set, an empty string is returned.
+func GetModulePath(ctx context.Context) string {
+	module := ctx.Value(moduleKey{})
+	if module == nil {
+		return ""
+	}
+
+	return module.(string)
+}
diff --git a/vendor/github.com/containerd/containerd/log/grpc.go b/vendor/github.com/containerd/containerd/log/grpc.go
new file mode 100644
index 0000000..cb2c921
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/log/grpc.go
@@ -0,0 +1,12 @@
+package log
+
+import (
+	"io/ioutil"
+	"log"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+func init() {
+	grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags))
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/adaptors.go b/vendor/github.com/containerd/containerd/metadata/adaptors.go
new file mode 100644
index 0000000..4e7ef67
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/adaptors.go
@@ -0,0 +1,113 @@
+package metadata
+
+import (
+	"strings"
+
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/filters"
+	"github.com/containerd/containerd/images"
+)
+
+func adaptImage(o interface{}) filters.Adaptor {
+	obj := o.(images.Image)
+	return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+		if len(fieldpath) == 0 {
+			return "", false
+		}
+
+		switch fieldpath[0] {
+		case "name":
+			return obj.Name, len(obj.Name) > 0
+		case "target":
+			if len(fieldpath) < 2 {
+				return "", false
+			}
+
+			switch fieldpath[1] {
+			case "digest":
+				return obj.Target.Digest.String(), len(obj.Target.Digest) > 0
+			case "mediatype":
+				return obj.Target.MediaType, len(obj.Target.MediaType) > 0
+			}
+		case "labels":
+			return checkMap(fieldpath[1:], obj.Labels)
+			// TODO(stevvooe): Greater/Less than filters would be awesome for
+			// size. Let's do it!
+		}
+
+		return "", false
+	})
+}
+func adaptContainer(o interface{}) filters.Adaptor {
+	obj := o.(containers.Container)
+	return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+		if len(fieldpath) == 0 {
+			return "", false
+		}
+
+		switch fieldpath[0] {
+		case "id":
+			return obj.ID, len(obj.ID) > 0
+		case "runtime":
+			if len(fieldpath) <= 1 {
+				return "", false
+			}
+
+			switch fieldpath[1] {
+			case "name":
+				return obj.Runtime.Name, len(obj.Runtime.Name) > 0
+			default:
+				return "", false
+			}
+		case "image":
+			return obj.Image, len(obj.Image) > 0
+		case "labels":
+			return checkMap(fieldpath[1:], obj.Labels)
+		}
+
+		return "", false
+	})
+}
+
+func adaptContentInfo(info content.Info) filters.Adaptor {
+	return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+		if len(fieldpath) == 0 {
+			return "", false
+		}
+
+		switch fieldpath[0] {
+		case "digest":
+			return info.Digest.String(), true
+		case "size":
+			// TODO: support size based filtering
+		case "labels":
+			return checkMap(fieldpath[1:], info.Labels)
+		}
+
+		return "", false
+	})
+}
+
+func adaptContentStatus(status content.Status) filters.Adaptor {
+	return filters.AdapterFunc(func(fieldpath []string) (string, bool) {
+		if len(fieldpath) == 0 {
+			return "", false
+		}
+		switch fieldpath[0] {
+		case "ref":
+			return status.Ref, true
+		}
+
+		return "", false
+	})
+}
+
+func checkMap(fieldpath []string, m map[string]string) (string, bool) {
+	if len(m) == 0 {
+		return "", false
+	}
+
+	value, ok := m[strings.Join(fieldpath, ".")]
+	return value, ok
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/bolt.go b/vendor/github.com/containerd/containerd/metadata/bolt.go
new file mode 100644
index 0000000..2e4c352
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/bolt.go
@@ -0,0 +1,45 @@
+package metadata
+
+import (
+	"context"
+
+	"github.com/boltdb/bolt"
+	"github.com/pkg/errors"
+)
+
+type transactionKey struct{}
+
+// WithTransactionContext returns a new context holding the provided
+// bolt transaction. Functions which require a bolt transaction will
+// first check to see if a transaction is already created on the
+// context before creating their own.
+func WithTransactionContext(ctx context.Context, tx *bolt.Tx) context.Context {
+	return context.WithValue(ctx, transactionKey{}, tx)
+}
+
+type transactor interface {
+	View(fn func(*bolt.Tx) error) error
+	Update(fn func(*bolt.Tx) error) error
+}
+
+// view gets a bolt db transaction either from the context
+// or starts a new one with the provided bolt database.
+func view(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error {
+	tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx)
+	if !ok {
+		return db.View(fn)
+	}
+	return fn(tx)
+}
+
+// update gets a writable bolt db transaction either from the context
+// or starts a new one with the provided bolt database.
+func update(ctx context.Context, db transactor, fn func(*bolt.Tx) error) error {
+	tx, ok := ctx.Value(transactionKey{}).(*bolt.Tx)
+	if !ok {
+		return db.Update(fn)
+	} else if !tx.Writable() {
+		return errors.Wrap(bolt.ErrTxNotWritable, "unable to use transaction from context")
+	}
+	return fn(tx)
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
new file mode 100644
index 0000000..b713132
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/boltutil/helpers.go
@@ -0,0 +1,111 @@
+package boltutil
+
+import (
+	"time"
+
+	"github.com/boltdb/bolt"
+	"github.com/pkg/errors"
+)
+
+var (
+	bucketKeyLabels    = []byte("labels")
+	bucketKeyCreatedAt = []byte("createdat")
+	bucketKeyUpdatedAt = []byte("updatedat")
+)
+
+// ReadLabels reads the labels key from the bucket
+// Uses the key "labels"
+func ReadLabels(bkt *bolt.Bucket) (map[string]string, error) {
+	lbkt := bkt.Bucket(bucketKeyLabels)
+	if lbkt == nil {
+		return nil, nil
+	}
+	labels := map[string]string{}
+	if err := lbkt.ForEach(func(k, v []byte) error {
+		labels[string(k)] = string(v)
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+	return labels, nil
+}
+
+// WriteLabels will write a new labels bucket to the provided bucket at key
+// bucketKeyLabels, replacing the contents of the bucket with the provided map.
+//
+// The provide map labels will be modified to have the final contents of the
+// bucket. Typically, this removes zero-value entries.
+// Uses the key "labels"
+func WriteLabels(bkt *bolt.Bucket, labels map[string]string) error {
+	// Remove existing labels to keep from merging
+	if lbkt := bkt.Bucket(bucketKeyLabels); lbkt != nil {
+		if err := bkt.DeleteBucket(bucketKeyLabels); err != nil {
+			return err
+		}
+	}
+
+	if len(labels) == 0 {
+		return nil
+	}
+
+	lbkt, err := bkt.CreateBucket(bucketKeyLabels)
+	if err != nil {
+		return err
+	}
+
+	for k, v := range labels {
+		if v == "" {
+			delete(labels, k) // remove since we don't actually set it
+			continue
+		}
+
+		if err := lbkt.Put([]byte(k), []byte(v)); err != nil {
+			return errors.Wrapf(err, "failed to set label %q=%q", k, v)
+		}
+	}
+
+	return nil
+}
+
+// ReadTimestamps reads created and updated timestamps from a bucket.
+// Uses keys "createdat" and "updatedat"
+func ReadTimestamps(bkt *bolt.Bucket, created, updated *time.Time) error {
+	for _, f := range []struct {
+		b []byte
+		t *time.Time
+	}{
+		{bucketKeyCreatedAt, created},
+		{bucketKeyUpdatedAt, updated},
+	} {
+		v := bkt.Get(f.b)
+		if v != nil {
+			if err := f.t.UnmarshalBinary(v); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+// WriteTimestamps writes created and updated timestamps to a bucket.
+// Uses keys "createdat" and "updatedat"
+func WriteTimestamps(bkt *bolt.Bucket, created, updated time.Time) error {
+	createdAt, err := created.MarshalBinary()
+	if err != nil {
+		return err
+	}
+	updatedAt, err := updated.MarshalBinary()
+	if err != nil {
+		return err
+	}
+	for _, v := range [][2][]byte{
+		{bucketKeyCreatedAt, createdAt},
+		{bucketKeyUpdatedAt, updatedAt},
+	} {
+		if err := bkt.Put(v[0], v[1]); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/buckets.go b/vendor/github.com/containerd/containerd/metadata/buckets.go
new file mode 100644
index 0000000..43849e0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/buckets.go
@@ -0,0 +1,175 @@
+package metadata
+
+import (
+	"github.com/boltdb/bolt"
+	digest "github.com/opencontainers/go-digest"
+)
+
+// The layout where a "/" delineates a bucket is desribed in the following
+// section. Please try to follow this as closely as possible when adding
+// functionality. We can bolster this with helpers and more structure if that
+// becomes an issue.
+//
+// Generically, we try to do the following:
+//
+// 	<version>/<namespace>/<object>/<key> -> <field>
+//
+// version: Currently, this is "v1". Additions can be made to v1 in a backwards
+// compatible way. If the layout changes, a new version must be made, along
+// with a migration.
+//
+// namespace: the namespace to which this object belongs.
+//
+// object: defines which object set is stored in the bucket. There are two
+// special objects, "labels" and "indexes". The "labels" bucket stores the
+// labels for the parent namespace. The "indexes" object is reserved for
+// indexing objects, if we require in the future.
+//
+// key: object-specific key identifying the storage bucket for the objects
+// contents.
+var (
+	bucketKeyVersion          = []byte(schemaVersion)
+	bucketKeyDBVersion        = []byte("version")    // stores the version of the schema
+	bucketKeyObjectLabels     = []byte("labels")     // stores the labels for a namespace.
+	bucketKeyObjectIndexes    = []byte("indexes")    // reserved
+	bucketKeyObjectImages     = []byte("images")     // stores image objects
+	bucketKeyObjectContainers = []byte("containers") // stores container objects
+	bucketKeyObjectSnapshots  = []byte("snapshots")  // stores snapshot references
+	bucketKeyObjectContent    = []byte("content")    // stores content references
+	bucketKeyObjectBlob       = []byte("blob")       // stores content links
+	bucketKeyObjectIngest     = []byte("ingest")     // stores ingest links
+
+	bucketKeyDigest      = []byte("digest")
+	bucketKeyMediaType   = []byte("mediatype")
+	bucketKeySize        = []byte("size")
+	bucketKeyImage       = []byte("image")
+	bucketKeyRuntime     = []byte("runtime")
+	bucketKeyName        = []byte("name")
+	bucketKeyParent      = []byte("parent")
+	bucketKeyChildren    = []byte("children")
+	bucketKeyOptions     = []byte("options")
+	bucketKeySpec        = []byte("spec")
+	bucketKeySnapshotKey = []byte("snapshotKey")
+	bucketKeySnapshotter = []byte("snapshotter")
+	bucketKeyTarget      = []byte("target")
+	bucketKeyExtensions  = []byte("extensions")
+)
+
+func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket {
+	bkt := tx.Bucket(keys[0])
+
+	for _, key := range keys[1:] {
+		if bkt == nil {
+			break
+		}
+		bkt = bkt.Bucket(key)
+	}
+
+	return bkt
+}
+
+func createBucketIfNotExists(tx *bolt.Tx, keys ...[]byte) (*bolt.Bucket, error) {
+	bkt, err := tx.CreateBucketIfNotExists(keys[0])
+	if err != nil {
+		return nil, err
+	}
+
+	for _, key := range keys[1:] {
+		bkt, err = bkt.CreateBucketIfNotExists(key)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return bkt, nil
+}
+
+func namespaceLabelsBucketPath(namespace string) [][]byte {
+	return [][]byte{bucketKeyVersion, []byte(namespace), bucketKeyObjectLabels}
+}
+
+func withNamespacesLabelsBucket(tx *bolt.Tx, namespace string, fn func(bkt *bolt.Bucket) error) error {
+	bkt, err := createBucketIfNotExists(tx, namespaceLabelsBucketPath(namespace)...)
+	if err != nil {
+		return err
+	}
+
+	return fn(bkt)
+}
+
+func getNamespaceLabelsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
+	return getBucket(tx, namespaceLabelsBucketPath(namespace)...)
+}
+
+func imagesBucketPath(namespace string) [][]byte {
+	return [][]byte{bucketKeyVersion, []byte(namespace), bucketKeyObjectImages}
+}
+
+func withImagesBucket(tx *bolt.Tx, namespace string, fn func(bkt *bolt.Bucket) error) error {
+	bkt, err := createBucketIfNotExists(tx, imagesBucketPath(namespace)...)
+	if err != nil {
+		return err
+	}
+
+	return fn(bkt)
+}
+
+func getImagesBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
+	return getBucket(tx, imagesBucketPath(namespace)...)
+}
+
+func createContainersBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) {
+	bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers)
+	if err != nil {
+		return nil, err
+	}
+	return bkt, nil
+}
+
+func getContainersBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
+	return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers)
+}
+
+func getContainerBucket(tx *bolt.Tx, namespace, id string) *bolt.Bucket {
+	return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContainers, []byte(id))
+}
+
+func createSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) (*bolt.Bucket, error) {
+	bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots, []byte(snapshotter))
+	if err != nil {
+		return nil, err
+	}
+	return bkt, nil
+}
+
+func getSnapshotterBucket(tx *bolt.Tx, namespace, snapshotter string) *bolt.Bucket {
+	return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectSnapshots, []byte(snapshotter))
+}
+
+func createBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) (*bolt.Bucket, error) {
+	bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(dgst.String()))
+	if err != nil {
+		return nil, err
+	}
+	return bkt, nil
+}
+
+func getBlobsBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
+	return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob)
+}
+
+func getBlobBucket(tx *bolt.Tx, namespace string, dgst digest.Digest) *bolt.Bucket {
+	return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(dgst.String()))
+}
+
+func createIngestBucket(tx *bolt.Tx, namespace string) (*bolt.Bucket, error) {
+	bkt, err := createBucketIfNotExists(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngest)
+	if err != nil {
+		return nil, err
+	}
+	return bkt, nil
+}
+
+func getIngestBucket(tx *bolt.Tx, namespace string) *bolt.Bucket {
+	return getBucket(tx, bucketKeyVersion, []byte(namespace), bucketKeyObjectContent, bucketKeyObjectIngest)
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/containers.go b/vendor/github.com/containerd/containerd/metadata/containers.go
new file mode 100644
index 0000000..c9d8ab6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/containers.go
@@ -0,0 +1,414 @@
+package metadata
+
+import (
+	"context"
+	"strings"
+	"time"
+
+	"github.com/boltdb/bolt"
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/filters"
+	"github.com/containerd/containerd/identifiers"
+	"github.com/containerd/containerd/labels"
+	"github.com/containerd/containerd/metadata/boltutil"
+	"github.com/containerd/containerd/namespaces"
+	"github.com/gogo/protobuf/proto"
+	"github.com/gogo/protobuf/types"
+	"github.com/pkg/errors"
+)
+
+type containerStore struct {
+	tx *bolt.Tx
+}
+
+// NewContainerStore returns a Store backed by an underlying bolt DB
+func NewContainerStore(tx *bolt.Tx) containers.Store {
+	return &containerStore{
+		tx: tx,
+	}
+}
+
+func (s *containerStore) Get(ctx context.Context, id string) (containers.Container, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return containers.Container{}, err
+	}
+
+	bkt := getContainerBucket(s.tx, namespace, id)
+	if bkt == nil {
+		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "bucket name %q:%q", namespace, id)
+	}
+
+	container := containers.Container{ID: id}
+	if err := readContainer(&container, bkt); err != nil {
+		return containers.Container{}, errors.Wrapf(err, "failed to read container %v", id)
+	}
+
+	return container, nil
+}
+
+func (s *containerStore) List(ctx context.Context, fs ...string) ([]containers.Container, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	filter, err := filters.ParseAll(fs...)
+	if err != nil {
+		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
+	}
+
+	bkt := getContainersBucket(s.tx, namespace)
+	if bkt == nil {
+		return nil, nil
+	}
+
+	var m []containers.Container
+	if err := bkt.ForEach(func(k, v []byte) error {
+		cbkt := bkt.Bucket(k)
+		if cbkt == nil {
+			return nil
+		}
+		container := containers.Container{ID: string(k)}
+
+		if err := readContainer(&container, cbkt); err != nil {
+			return errors.Wrap(err, "failed to read container")
+		}
+
+		if filter.Match(adaptContainer(container)) {
+			m = append(m, container)
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	return m, nil
+}
+
+func (s *containerStore) Create(ctx context.Context, container containers.Container) (containers.Container, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return containers.Container{}, err
+	}
+
+	if err := validateContainer(&container); err != nil {
+		return containers.Container{}, errors.Wrap(err, "create container failed validation")
+	}
+
+	bkt, err := createContainersBucket(s.tx, namespace)
+	if err != nil {
+		return containers.Container{}, err
+	}
+
+	cbkt, err := bkt.CreateBucket([]byte(container.ID))
+	if err != nil {
+		if err == bolt.ErrBucketExists {
+			err = errors.Wrapf(errdefs.ErrAlreadyExists, "content %q", container.ID)
+		}
+		return containers.Container{}, err
+	}
+
+	container.CreatedAt = time.Now().UTC()
+	container.UpdatedAt = container.CreatedAt
+	if err := writeContainer(cbkt, &container); err != nil {
+		return containers.Container{}, errors.Wrap(err, "failed to write container")
+	}
+
+	return container, nil
+}
+
+func (s *containerStore) Update(ctx context.Context, container containers.Container, fieldpaths ...string) (containers.Container, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return containers.Container{}, err
+	}
+
+	if container.ID == "" {
+		return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "must specify a container id")
+	}
+
+	bkt := getContainersBucket(s.tx, namespace)
+	if bkt == nil {
+		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
+	}
+
+	cbkt := bkt.Bucket([]byte(container.ID))
+	if cbkt == nil {
+		return containers.Container{}, errors.Wrapf(errdefs.ErrNotFound, "container %q", container.ID)
+	}
+
+	var updated containers.Container
+	if err := readContainer(&updated, cbkt); err != nil {
+		return updated, errors.Wrapf(err, "failed to read container from bucket")
+	}
+	createdat := updated.CreatedAt
+	updated.ID = container.ID
+
+	if len(fieldpaths) == 0 {
+		// only allow updates to these field on full replace.
+		fieldpaths = []string{"labels", "spec", "extensions"}
+
+		// Fields that are immutable must cause an error when no field paths
+		// are provided. This allows these fields to become mutable in the
+		// future.
+		if updated.Image != container.Image {
+			return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Image field is immutable")
+		}
+
+		if updated.SnapshotKey != container.SnapshotKey {
+			return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.SnapshotKey field is immutable")
+		}
+
+		if updated.Snapshotter != container.Snapshotter {
+			return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable")
+		}
+
+		if updated.Runtime.Name != container.Runtime.Name {
+			return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable")
+		}
+	}
+
+	// apply the field mask. If you update this code, you better follow the
+	// field mask rules in field_mask.proto. If you don't know what this
+	// is, do not update this code.
+	for _, path := range fieldpaths {
+		if strings.HasPrefix(path, "labels.") {
+			if updated.Labels == nil {
+				updated.Labels = map[string]string{}
+			}
+			key := strings.TrimPrefix(path, "labels.")
+			updated.Labels[key] = container.Labels[key]
+			continue
+		}
+
+		if strings.HasPrefix(path, "extensions.") {
+			if updated.Extensions == nil {
+				updated.Extensions = map[string]types.Any{}
+			}
+			key := strings.TrimPrefix(path, "extensions.")
+			updated.Extensions[key] = container.Extensions[key]
+			continue
+		}
+
+		switch path {
+		case "labels":
+			updated.Labels = container.Labels
+		case "spec":
+			updated.Spec = container.Spec
+		case "extensions":
+			updated.Extensions = container.Extensions
+		default:
+			return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID)
+		}
+	}
+
+	if err := validateContainer(&updated); err != nil {
+		return containers.Container{}, errors.Wrap(err, "update failed validation")
+	}
+
+	updated.CreatedAt = createdat
+	updated.UpdatedAt = time.Now().UTC()
+	if err := writeContainer(cbkt, &updated); err != nil {
+		return containers.Container{}, errors.Wrap(err, "failed to write container")
+	}
+
+	return updated, nil
+}
+
+func (s *containerStore) Delete(ctx context.Context, id string) error {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	bkt := getContainersBucket(s.tx, namespace)
+	if bkt == nil {
+		return errors.Wrapf(errdefs.ErrNotFound, "cannot delete container %v, bucket not present", id)
+	}
+
+	if err := bkt.DeleteBucket([]byte(id)); err == bolt.ErrBucketNotFound {
+		return errors.Wrapf(errdefs.ErrNotFound, "container %v", id)
+	}
+	return err
+}
+
+func validateContainer(container *containers.Container) error {
+	if err := identifiers.Validate(container.ID); err != nil {
+		return errors.Wrapf(err, "container.ID validation error")
+	}
+
+	for k := range container.Extensions {
+		if k == "" {
+			return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Extension keys must not be zero-length")
+		}
+	}
+
+	// image has no validation
+	for k, v := range container.Labels {
+		if err := labels.Validate(k, v); err == nil {
+			return errors.Wrapf(err, "containers.Labels")
+		}
+	}
+
+	if container.Runtime.Name == "" {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name must be set")
+	}
+
+	if container.Spec == nil {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Spec must be set")
+	}
+
+	if container.SnapshotKey != "" && container.Snapshotter == "" {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set if container.SnapshotKey is set")
+	}
+
+	return nil
+}
+
+func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
+	labels, err := boltutil.ReadLabels(bkt)
+	if err != nil {
+		return err
+	}
+	container.Labels = labels
+
+	if err := boltutil.ReadTimestamps(bkt, &container.CreatedAt, &container.UpdatedAt); err != nil {
+		return err
+	}
+
+	return bkt.ForEach(func(k, v []byte) error {
+		switch string(k) {
+		case string(bucketKeyImage):
+			container.Image = string(v)
+		case string(bucketKeyRuntime):
+			rbkt := bkt.Bucket(bucketKeyRuntime)
+			if rbkt == nil {
+				return nil // skip runtime. should be an error?
+			}
+
+			n := rbkt.Get(bucketKeyName)
+			if n != nil {
+				container.Runtime.Name = string(n)
+			}
+
+			obkt := rbkt.Get(bucketKeyOptions)
+			if obkt == nil {
+				return nil
+			}
+
+			var any types.Any
+			if err := proto.Unmarshal(obkt, &any); err != nil {
+				return err
+			}
+			container.Runtime.Options = &any
+		case string(bucketKeySpec):
+			var any types.Any
+			if err := proto.Unmarshal(v, &any); err != nil {
+				return err
+			}
+			container.Spec = &any
+		case string(bucketKeySnapshotKey):
+			container.SnapshotKey = string(v)
+		case string(bucketKeySnapshotter):
+			container.Snapshotter = string(v)
+		case string(bucketKeyExtensions):
+			ebkt := bkt.Bucket(bucketKeyExtensions)
+			if ebkt == nil {
+				return nil
+			}
+
+			extensions := make(map[string]types.Any)
+			if err := ebkt.ForEach(func(k, v []byte) error {
+				var a types.Any
+				if err := proto.Unmarshal(v, &a); err != nil {
+					return err
+				}
+
+				extensions[string(k)] = a
+				return nil
+			}); err != nil {
+
+				return err
+			}
+
+			container.Extensions = extensions
+		}
+
+		return nil
+	})
+}
+
+func writeContainer(bkt *bolt.Bucket, container *containers.Container) error {
+	if err := boltutil.WriteTimestamps(bkt, container.CreatedAt, container.UpdatedAt); err != nil {
+		return err
+	}
+
+	if container.Spec != nil {
+		spec, err := container.Spec.Marshal()
+		if err != nil {
+			return err
+		}
+
+		if err := bkt.Put(bucketKeySpec, spec); err != nil {
+			return err
+		}
+	}
+
+	for _, v := range [][2][]byte{
+		{bucketKeyImage, []byte(container.Image)},
+		{bucketKeySnapshotter, []byte(container.Snapshotter)},
+		{bucketKeySnapshotKey, []byte(container.SnapshotKey)},
+	} {
+		if err := bkt.Put(v[0], v[1]); err != nil {
+			return err
+		}
+	}
+
+	if rbkt := bkt.Bucket(bucketKeyRuntime); rbkt != nil {
+		if err := bkt.DeleteBucket(bucketKeyRuntime); err != nil {
+			return err
+		}
+	}
+
+	rbkt, err := bkt.CreateBucket(bucketKeyRuntime)
+	if err != nil {
+		return err
+	}
+
+	if err := rbkt.Put(bucketKeyName, []byte(container.Runtime.Name)); err != nil {
+		return err
+	}
+
+	if len(container.Extensions) > 0 {
+		ebkt, err := bkt.CreateBucketIfNotExists(bucketKeyExtensions)
+		if err != nil {
+			return err
+		}
+
+		for name, ext := range container.Extensions {
+			p, err := proto.Marshal(&ext)
+			if err != nil {
+				return err
+			}
+
+			if err := ebkt.Put([]byte(name), p); err != nil {
+				return err
+			}
+		}
+	}
+
+	if container.Runtime.Options != nil {
+		data, err := proto.Marshal(container.Runtime.Options)
+		if err != nil {
+			return err
+		}
+
+		if err := rbkt.Put(bucketKeyOptions, data); err != nil {
+			return err
+		}
+	}
+
+	return boltutil.WriteLabels(bkt, container.Labels)
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go
new file mode 100644
index 0000000..05064fd
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/content.go
@@ -0,0 +1,582 @@
+package metadata
+
+import (
+	"context"
+	"encoding/binary"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/boltdb/bolt"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/filters"
+	"github.com/containerd/containerd/labels"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/metadata/boltutil"
+	"github.com/containerd/containerd/namespaces"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+)
+
+type contentStore struct {
+	content.Store
+	db *DB
+	l  sync.RWMutex
+}
+
+// newContentStore returns a namespaced content store using an existing
+// content store interface.
+func newContentStore(db *DB, cs content.Store) *contentStore {
+	return &contentStore{
+		Store: cs,
+		db:    db,
+	}
+}
+
+func (cs *contentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return content.Info{}, err
+	}
+
+	var info content.Info
+	if err := view(ctx, cs.db, func(tx *bolt.Tx) error {
+		bkt := getBlobBucket(tx, ns, dgst)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst)
+		}
+
+		info.Digest = dgst
+		return readInfo(&info, bkt)
+	}); err != nil {
+		return content.Info{}, err
+	}
+
+	return info, nil
+}
+
+func (cs *contentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return content.Info{}, err
+	}
+
+	cs.l.RLock()
+	defer cs.l.RUnlock()
+
+	updated := content.Info{
+		Digest: info.Digest,
+	}
+	if err := update(ctx, cs.db, func(tx *bolt.Tx) error {
+		bkt := getBlobBucket(tx, ns, info.Digest)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", info.Digest)
+		}
+
+		if err := readInfo(&updated, bkt); err != nil {
+			return errors.Wrapf(err, "info %q", info.Digest)
+		}
+
+		if len(fieldpaths) > 0 {
+			for _, path := range fieldpaths {
+				if strings.HasPrefix(path, "labels.") {
+					if updated.Labels == nil {
+						updated.Labels = map[string]string{}
+					}
+
+					key := strings.TrimPrefix(path, "labels.")
+					updated.Labels[key] = info.Labels[key]
+					continue
+				}
+
+				switch path {
+				case "labels":
+					updated.Labels = info.Labels
+				default:
+					return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
+				}
+			}
+		} else {
+			// Set mutable fields
+			updated.Labels = info.Labels
+		}
+		if err := validateInfo(&updated); err != nil {
+			return err
+		}
+
+		updated.UpdatedAt = time.Now().UTC()
+		return writeInfo(&updated, bkt)
+	}); err != nil {
+		return content.Info{}, err
+	}
+	return updated, nil
+}
+
+func (cs *contentStore) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	filter, err := filters.ParseAll(fs...)
+	if err != nil {
+		return err
+	}
+
+	// TODO: Batch results to keep from reading all info into memory
+	var infos []content.Info
+	if err := view(ctx, cs.db, func(tx *bolt.Tx) error {
+		bkt := getBlobsBucket(tx, ns)
+		if bkt == nil {
+			return nil
+		}
+
+		return bkt.ForEach(func(k, v []byte) error {
+			dgst, err := digest.Parse(string(k))
+			if err != nil {
+				// Not a digest, skip
+				return nil
+			}
+			bbkt := bkt.Bucket(k)
+			if bbkt == nil {
+				return nil
+			}
+			info := content.Info{
+				Digest: dgst,
+			}
+			if err := readInfo(&info, bkt.Bucket(k)); err != nil {
+				return err
+			}
+			if filter.Match(adaptContentInfo(info)) {
+				infos = append(infos, info)
+			}
+			return nil
+		})
+	}); err != nil {
+		return err
+	}
+
+	for _, info := range infos {
+		if err := fn(info); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (cs *contentStore) Delete(ctx context.Context, dgst digest.Digest) error {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	cs.l.RLock()
+	defer cs.l.RUnlock()
+
+	return update(ctx, cs.db, func(tx *bolt.Tx) error {
+		bkt := getBlobBucket(tx, ns, dgst)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst)
+		}
+
+		if err := getBlobsBucket(tx, ns).DeleteBucket([]byte(dgst.String())); err != nil {
+			return err
+		}
+
+		// Mark content store as dirty for triggering garbage collection
+		cs.db.dirtyL.Lock()
+		cs.db.dirtyCS = true
+		cs.db.dirtyL.Unlock()
+
+		return nil
+	})
+}
+
+func (cs *contentStore) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	filter, err := filters.ParseAll(fs...)
+	if err != nil {
+		return nil, err
+	}
+
+	brefs := map[string]string{}
+	if err := view(ctx, cs.db, func(tx *bolt.Tx) error {
+		bkt := getIngestBucket(tx, ns)
+		if bkt == nil {
+			return nil
+		}
+
+		return bkt.ForEach(func(k, v []byte) error {
+			// TODO(dmcgowan): match name and potentially labels here
+			brefs[string(k)] = string(v)
+			return nil
+		})
+	}); err != nil {
+		return nil, err
+	}
+
+	statuses := make([]content.Status, 0, len(brefs))
+	for k, bref := range brefs {
+		status, err := cs.Store.Status(ctx, bref)
+		if err != nil {
+			if errdefs.IsNotFound(err) {
+				continue
+			}
+			return nil, err
+		}
+		status.Ref = k
+
+		if filter.Match(adaptContentStatus(status)) {
+			statuses = append(statuses, status)
+		}
+	}
+
+	return statuses, nil
+
+}
+
+func getRef(tx *bolt.Tx, ns, ref string) string {
+	bkt := getIngestBucket(tx, ns)
+	if bkt == nil {
+		return ""
+	}
+	v := bkt.Get([]byte(ref))
+	if len(v) == 0 {
+		return ""
+	}
+	return string(v)
+}
+
+func (cs *contentStore) Status(ctx context.Context, ref string) (content.Status, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return content.Status{}, err
+	}
+
+	var bref string
+	if err := view(ctx, cs.db, func(tx *bolt.Tx) error {
+		bref = getRef(tx, ns, ref)
+		if bref == "" {
+			return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref)
+		}
+
+		return nil
+	}); err != nil {
+		return content.Status{}, err
+	}
+
+	st, err := cs.Store.Status(ctx, bref)
+	if err != nil {
+		return content.Status{}, err
+	}
+	st.Ref = ref
+	return st, nil
+}
+
+func (cs *contentStore) Abort(ctx context.Context, ref string) error {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	cs.l.RLock()
+	defer cs.l.RUnlock()
+
+	return update(ctx, cs.db, func(tx *bolt.Tx) error {
+		bkt := getIngestBucket(tx, ns)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref)
+		}
+		bref := string(bkt.Get([]byte(ref)))
+		if bref == "" {
+			return errors.Wrapf(errdefs.ErrNotFound, "reference %v", ref)
+		}
+		if err := bkt.Delete([]byte(ref)); err != nil {
+			return err
+		}
+
+		return cs.Store.Abort(ctx, bref)
+	})
+
+}
+
+func (cs *contentStore) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	cs.l.RLock()
+	defer cs.l.RUnlock()
+
+	var w content.Writer
+	if err := update(ctx, cs.db, func(tx *bolt.Tx) error {
+		if expected != "" {
+			cbkt := getBlobBucket(tx, ns, expected)
+			if cbkt != nil {
+				return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
+			}
+		}
+
+		bkt, err := createIngestBucket(tx, ns)
+		if err != nil {
+			return err
+		}
+
+		var (
+			bref  string
+			brefb = bkt.Get([]byte(ref))
+		)
+
+		if brefb == nil {
+			sid, err := bkt.NextSequence()
+			if err != nil {
+				return err
+			}
+
+			bref = createKey(sid, ns, ref)
+			if err := bkt.Put([]byte(ref), []byte(bref)); err != nil {
+				return err
+			}
+		} else {
+			bref = string(brefb)
+		}
+
+		// Do not use the passed in expected value here since it was
+		// already checked against the user metadata. If the content
+		// store has the content, it must still be written before
+		// linked into the given namespace. It is possible in the future
+		// to allow content which exists in content store but not
+		// namespace to be linked here and returned an exist error, but
+		// this would require more configuration to make secure.
+		w, err = cs.Store.Writer(ctx, bref, size, "")
+		return err
+	}); err != nil {
+		return nil, err
+	}
+
+	// TODO: keep the expected in the writer to use on commit
+	// when no expected is provided there.
+	return &namespacedWriter{
+		Writer:    w,
+		ref:       ref,
+		namespace: ns,
+		db:        cs.db,
+		l:         &cs.l,
+	}, nil
+}
+
+type namespacedWriter struct {
+	content.Writer
+	ref       string
+	namespace string
+	db        transactor
+	l         *sync.RWMutex
+}
+
+func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+	nw.l.RLock()
+	defer nw.l.RUnlock()
+
+	return update(ctx, nw.db, func(tx *bolt.Tx) error {
+		bkt := getIngestBucket(tx, nw.namespace)
+		if bkt != nil {
+			if err := bkt.Delete([]byte(nw.ref)); err != nil {
+				return err
+			}
+		}
+		return nw.commit(ctx, tx, size, expected, opts...)
+	})
+}
+
+func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, expected digest.Digest, opts ...content.Opt) error {
+	var base content.Info
+	for _, opt := range opts {
+		if err := opt(&base); err != nil {
+			return err
+		}
+	}
+	if err := validateInfo(&base); err != nil {
+		return err
+	}
+
+	status, err := nw.Writer.Status()
+	if err != nil {
+		return err
+	}
+	if size != 0 && size != status.Offset {
+		return errors.Errorf("%q failed size validation: %v != %v", nw.ref, status.Offset, size)
+	}
+	size = status.Offset
+
+	actual := nw.Writer.Digest()
+
+	if err := nw.Writer.Commit(ctx, size, expected); err != nil {
+		if !errdefs.IsAlreadyExists(err) {
+			return err
+		}
+		if getBlobBucket(tx, nw.namespace, actual) != nil {
+			return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", actual)
+		}
+	}
+
+	bkt, err := createBlobBucket(tx, nw.namespace, actual)
+	if err != nil {
+		return err
+	}
+
+	commitTime := time.Now().UTC()
+
+	sizeEncoded, err := encodeInt(size)
+	if err != nil {
+		return err
+	}
+
+	if err := boltutil.WriteTimestamps(bkt, commitTime, commitTime); err != nil {
+		return err
+	}
+	if err := boltutil.WriteLabels(bkt, base.Labels); err != nil {
+		return err
+	}
+	return bkt.Put(bucketKeySize, sizeEncoded)
+}
+
+func (nw *namespacedWriter) Status() (content.Status, error) {
+	st, err := nw.Writer.Status()
+	if err == nil {
+		st.Ref = nw.ref
+	}
+	return st, err
+}
+
+func (cs *contentStore) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) {
+	if err := cs.checkAccess(ctx, dgst); err != nil {
+		return nil, err
+	}
+	return cs.Store.ReaderAt(ctx, dgst)
+}
+
+func (cs *contentStore) checkAccess(ctx context.Context, dgst digest.Digest) error {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	return view(ctx, cs.db, func(tx *bolt.Tx) error {
+		bkt := getBlobBucket(tx, ns, dgst)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "content digest %v", dgst)
+		}
+		return nil
+	})
+}
+
+func validateInfo(info *content.Info) error {
+	for k, v := range info.Labels {
+		if err := labels.Validate(k, v); err == nil {
+			return errors.Wrapf(err, "info.Labels")
+		}
+	}
+
+	return nil
+}
+
+func readInfo(info *content.Info, bkt *bolt.Bucket) error {
+	if err := boltutil.ReadTimestamps(bkt, &info.CreatedAt, &info.UpdatedAt); err != nil {
+		return err
+	}
+
+	labels, err := boltutil.ReadLabels(bkt)
+	if err != nil {
+		return err
+	}
+	info.Labels = labels
+
+	if v := bkt.Get(bucketKeySize); len(v) > 0 {
+		info.Size, _ = binary.Varint(v)
+	}
+
+	return nil
+}
+
+func writeInfo(info *content.Info, bkt *bolt.Bucket) error {
+	if err := boltutil.WriteTimestamps(bkt, info.CreatedAt, info.UpdatedAt); err != nil {
+		return err
+	}
+
+	if err := boltutil.WriteLabels(bkt, info.Labels); err != nil {
+		return errors.Wrapf(err, "writing labels for info %v", info.Digest)
+	}
+
+	// Write size
+	sizeEncoded, err := encodeInt(info.Size)
+	if err != nil {
+		return err
+	}
+
+	return bkt.Put(bucketKeySize, sizeEncoded)
+}
+
+func (cs *contentStore) garbageCollect(ctx context.Context) error {
+	lt1 := time.Now()
+	cs.l.Lock()
+	defer func() {
+		cs.l.Unlock()
+		log.G(ctx).WithField("t", time.Now().Sub(lt1)).Debugf("content garbage collected")
+	}()
+
+	seen := map[string]struct{}{}
+	if err := cs.db.View(func(tx *bolt.Tx) error {
+		v1bkt := tx.Bucket(bucketKeyVersion)
+		if v1bkt == nil {
+			return nil
+		}
+
+		// iterate through each namespace
+		v1c := v1bkt.Cursor()
+
+		for k, v := v1c.First(); k != nil; k, v = v1c.Next() {
+			if v != nil {
+				continue
+			}
+
+			cbkt := v1bkt.Bucket(k).Bucket(bucketKeyObjectContent)
+			if cbkt == nil {
+				continue
+			}
+			bbkt := cbkt.Bucket(bucketKeyObjectBlob)
+			if err := bbkt.ForEach(func(ck, cv []byte) error {
+				if cv == nil {
+					seen[string(ck)] = struct{}{}
+				}
+				return nil
+			}); err != nil {
+				return err
+			}
+		}
+
+		return nil
+	}); err != nil {
+		return err
+	}
+
+	if err := cs.Store.Walk(ctx, func(info content.Info) error {
+		if _, ok := seen[info.Digest.String()]; !ok {
+			if err := cs.Store.Delete(ctx, info.Digest); err != nil {
+				return err
+			}
+			log.G(ctx).WithField("digest", info.Digest).Debug("removed content")
+		}
+		return nil
+	}); err != nil {
+		return err
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/db.go b/vendor/github.com/containerd/containerd/metadata/db.go
new file mode 100644
index 0000000..510d14a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/db.go
@@ -0,0 +1,318 @@
+package metadata
+
+import (
+	"context"
+	"encoding/binary"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/boltdb/bolt"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/gc"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/snapshot"
+	"github.com/pkg/errors"
+)
+
+const (
+	// schemaVersion represents the schema version of
+	// the database. This schema version represents the
+	// structure of the data in the database. The schema
+	// can envolve at any time but any backwards
+	// incompatible changes or structural changes require
+	// bumping the schema version.
+	schemaVersion = "v1"
+
+	// dbVersion represents updates to the schema
+	// version which are additions and compatible with
+	// prior version of the same schema.
+	dbVersion = 1
+)
+
+// DB represents a metadata database backed by a bolt
+// database. The database is fully namespaced and stores
+// image, container, namespace, snapshot, and content data
+// while proxying data shared across namespaces to backend
+// datastores for content and snapshots.
+type DB struct {
+	db *bolt.DB
+	ss map[string]*snapshotter
+	cs *contentStore
+
+	// wlock is used to protect access to the data structures during garbage
+	// collection. While the wlock is held no writable transactions can be
+	// opened, preventing changes from occurring between the mark and
+	// sweep phases without preventing read transactions.
+	wlock sync.RWMutex
+
+	// dirty flags and lock keeps track of datastores which have had deletions
+	// since the last garbage collection. These datastores will will be garbage
+	// collected during the next garbage collection.
+	dirtyL  sync.Mutex
+	dirtySS map[string]struct{}
+	dirtyCS bool
+
+	// TODO: Keep track of stats such as pause time, number of collected objects, errors
+	lastCollection time.Time
+}
+
+// NewDB creates a new metadata database using the provided
+// bolt database, content store, and snapshotters.
+func NewDB(db *bolt.DB, cs content.Store, ss map[string]snapshot.Snapshotter) *DB {
+	m := &DB{
+		db:      db,
+		ss:      make(map[string]*snapshotter, len(ss)),
+		dirtySS: map[string]struct{}{},
+	}
+
+	// Initialize data stores
+	m.cs = newContentStore(m, cs)
+	for name, sn := range ss {
+		m.ss[name] = newSnapshotter(m, name, sn)
+	}
+
+	return m
+}
+
+// Init ensures the database is at the correct version
+// and performs any needed migrations.
+func (m *DB) Init(ctx context.Context) error {
+	// errSkip is used when no migration or version needs to be written
+	// to the database and the transaction can be immediately rolled
+	// back rather than performing a much slower and unnecessary commit.
+	var errSkip = errors.New("skip update")
+
+	err := m.db.Update(func(tx *bolt.Tx) error {
+		var (
+			// current schema and version
+			schema  = "v0"
+			version = 0
+		)
+
+		// i represents the index of the first migration
+		// which must be run to get the database up to date.
+		// The migration's version will be checked in reverse
+		// order, decrementing i for each migration which
+		// represents a version newer than the current
+		// database version
+		i := len(migrations)
+
+		for ; i > 0; i-- {
+			migration := migrations[i-1]
+
+			bkt := tx.Bucket([]byte(migration.schema))
+			if bkt == nil {
+				// Hasn't encountered another schema, go to next migration
+				if schema == "v0" {
+					continue
+				}
+				break
+			}
+			if schema == "v0" {
+				schema = migration.schema
+				vb := bkt.Get(bucketKeyDBVersion)
+				if vb != nil {
+					v, _ := binary.Varint(vb)
+					version = int(v)
+				}
+			}
+
+			if version >= migration.version {
+				break
+			}
+		}
+
+		// Previous version fo database found
+		if schema != "v0" {
+			updates := migrations[i:]
+
+			// No migration updates, return immediately
+			if len(updates) == 0 {
+				return errSkip
+			}
+
+			for _, m := range updates {
+				t0 := time.Now()
+				if err := m.migrate(tx); err != nil {
+					return errors.Wrapf(err, "failed to migrate to %s.%d", m.schema, m.version)
+				}
+				log.G(ctx).WithField("d", time.Now().Sub(t0)).Debugf("database migration to %s.%d finished", m.schema, m.version)
+			}
+		}
+
+		bkt, err := tx.CreateBucketIfNotExists(bucketKeyVersion)
+		if err != nil {
+			return err
+		}
+
+		versionEncoded, err := encodeInt(dbVersion)
+		if err != nil {
+			return err
+		}
+
+		return bkt.Put(bucketKeyDBVersion, versionEncoded)
+	})
+	if err == errSkip {
+		err = nil
+	}
+	return err
+}
+
+// ContentStore returns a namespaced content store
+// proxied to a content store.
+func (m *DB) ContentStore() content.Store {
+	if m.cs == nil {
+		return nil
+	}
+	return m.cs
+}
+
+// Snapshotter returns a namespaced content store for
+// the requested snapshotter name proxied to a snapshotter.
+func (m *DB) Snapshotter(name string) snapshot.Snapshotter {
+	sn, ok := m.ss[name]
+	if !ok {
+		return nil
+	}
+	return sn
+}
+
+// View runs a readonly transaction on the metadata store.
+func (m *DB) View(fn func(*bolt.Tx) error) error {
+	return m.db.View(fn)
+}
+
+// Update runs a writable transation on the metadata store.
+func (m *DB) Update(fn func(*bolt.Tx) error) error {
+	m.wlock.RLock()
+	defer m.wlock.RUnlock()
+	return m.db.Update(fn)
+}
+
+func (m *DB) GarbageCollect(ctx context.Context) error {
+	lt1 := time.Now()
+	m.wlock.Lock()
+	defer func() {
+		m.wlock.Unlock()
+		log.G(ctx).WithField("d", time.Now().Sub(lt1)).Debug("metadata garbage collected")
+	}()
+
+	var marked map[gc.Node]struct{}
+
+	if err := m.db.View(func(tx *bolt.Tx) error {
+		ctx, cancel := context.WithCancel(ctx)
+		defer cancel()
+
+		roots := make(chan gc.Node)
+		errChan := make(chan error)
+		go func() {
+			defer close(errChan)
+			defer close(roots)
+
+			// Call roots
+			if err := scanRoots(ctx, tx, roots); err != nil {
+				cancel()
+				errChan <- err
+			}
+		}()
+
+		refs := func(ctx context.Context, n gc.Node, fn func(gc.Node)) error {
+			return references(ctx, tx, n, fn)
+		}
+
+		reachable, err := gc.ConcurrentMark(ctx, roots, refs)
+		if rerr := <-errChan; rerr != nil {
+			return rerr
+		}
+		if err != nil {
+			return err
+		}
+		marked = reachable
+		return nil
+	}); err != nil {
+		return err
+	}
+
+	m.dirtyL.Lock()
+	defer m.dirtyL.Unlock()
+
+	if err := m.db.Update(func(tx *bolt.Tx) error {
+		ctx, cancel := context.WithCancel(ctx)
+		defer cancel()
+
+		nodeC := make(chan gc.Node)
+		var scanErr error
+
+		go func() {
+			defer close(nodeC)
+			scanErr = scanAll(ctx, tx, nodeC)
+		}()
+
+		rm := func(n gc.Node) error {
+			if n.Type == ResourceSnapshot {
+				if idx := strings.IndexRune(n.Key, '/'); idx > 0 {
+					m.dirtySS[n.Key[:idx]] = struct{}{}
+				}
+			} else if n.Type == ResourceContent {
+				m.dirtyCS = true
+			}
+			return remove(ctx, tx, n)
+		}
+
+		if err := gc.Sweep(marked, nodeC, rm); err != nil {
+			return errors.Wrap(err, "failed to sweep")
+		}
+
+		if scanErr != nil {
+			return errors.Wrap(scanErr, "failed to scan all")
+		}
+
+		return nil
+	}); err != nil {
+		return err
+	}
+
+	m.lastCollection = time.Now()
+
+	if len(m.dirtySS) > 0 {
+		for snapshotterName := range m.dirtySS {
+			log.G(ctx).WithField("snapshotter", snapshotterName).Debug("scheduling snapshotter cleanup")
+			go m.cleanupSnapshotter(snapshotterName)
+		}
+		m.dirtySS = map[string]struct{}{}
+	}
+
+	if m.dirtyCS {
+		log.G(ctx).Debug("scheduling content cleanup")
+		go m.cleanupContent()
+		m.dirtyCS = false
+	}
+
+	return nil
+}
+
+func (m *DB) cleanupSnapshotter(name string) {
+	ctx := context.Background()
+	sn, ok := m.ss[name]
+	if !ok {
+		return
+	}
+
+	err := sn.garbageCollect(ctx)
+	if err != nil {
+		log.G(ctx).WithError(err).WithField("snapshotter", name).Warn("garbage collection failed")
+	}
+}
+
+func (m *DB) cleanupContent() {
+	ctx := context.Background()
+	if m.cs == nil {
+		return
+	}
+
+	err := m.cs.garbageCollect(ctx)
+	if err != nil {
+		log.G(ctx).WithError(err).Warn("content garbage collection failed")
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/gc.go b/vendor/github.com/containerd/containerd/metadata/gc.go
new file mode 100644
index 0000000..8434d69
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/gc.go
@@ -0,0 +1,343 @@
+package metadata
+
+import (
+	"context"
+	"fmt"
+	"strings"
+
+	"github.com/boltdb/bolt"
+	"github.com/containerd/containerd/gc"
+	"github.com/containerd/containerd/log"
+	"github.com/pkg/errors"
+)
+
+const (
+	ResourceUnknown gc.ResourceType = iota
+	ResourceContent
+	ResourceSnapshot
+	ResourceContainer
+	ResourceTask
+)
+
+var (
+	labelGCRoot       = []byte("containerd.io/gc.root")
+	labelGCSnapRef    = []byte("containerd.io/gc.ref.snapshot.")
+	labelGCContentRef = []byte("containerd.io/gc.ref.content")
+)
+
+func scanRoots(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
+	v1bkt := tx.Bucket(bucketKeyVersion)
+	if v1bkt == nil {
+		return nil
+	}
+
+	// iterate through each namespace
+	v1c := v1bkt.Cursor()
+
+	for k, v := v1c.First(); k != nil; k, v = v1c.Next() {
+		if v != nil {
+			continue
+		}
+		nbkt := v1bkt.Bucket(k)
+		ns := string(k)
+
+		ibkt := nbkt.Bucket(bucketKeyObjectImages)
+		if ibkt != nil {
+			if err := ibkt.ForEach(func(k, v []byte) error {
+				if v != nil {
+					return nil
+				}
+
+				target := ibkt.Bucket(k).Bucket(bucketKeyTarget)
+				if target != nil {
+					contentKey := string(target.Get(bucketKeyDigest))
+					select {
+					case nc <- gcnode(ResourceContent, ns, contentKey):
+					case <-ctx.Done():
+						return ctx.Err()
+					}
+				}
+				return sendSnapshotRefs(ns, ibkt.Bucket(k), func(n gc.Node) {
+					select {
+					case nc <- n:
+					case <-ctx.Done():
+					}
+				})
+			}); err != nil {
+				return err
+			}
+		}
+
+		cbkt := nbkt.Bucket(bucketKeyObjectContent)
+		if cbkt != nil {
+			cbkt = cbkt.Bucket(bucketKeyObjectBlob)
+		}
+		if cbkt != nil {
+			if err := cbkt.ForEach(func(k, v []byte) error {
+				if v != nil {
+					return nil
+				}
+				return sendRootRef(ctx, nc, gcnode(ResourceContent, ns, string(k)), cbkt.Bucket(k))
+			}); err != nil {
+				return err
+			}
+		}
+
+		cbkt = nbkt.Bucket(bucketKeyObjectContainers)
+		if cbkt != nil {
+			if err := cbkt.ForEach(func(k, v []byte) error {
+				if v != nil {
+					return nil
+				}
+				snapshotter := string(cbkt.Bucket(k).Get(bucketKeySnapshotter))
+				if snapshotter != "" {
+					ss := string(cbkt.Bucket(k).Get(bucketKeySnapshotKey))
+					select {
+					case nc <- gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, ss)):
+					case <-ctx.Done():
+						return ctx.Err()
+					}
+				}
+
+				// TODO: Send additional snapshot refs through labels
+				return sendSnapshotRefs(ns, cbkt.Bucket(k), func(n gc.Node) {
+					select {
+					case nc <- n:
+					case <-ctx.Done():
+					}
+				})
+			}); err != nil {
+				return err
+			}
+		}
+
+		sbkt := nbkt.Bucket(bucketKeyObjectSnapshots)
+		if sbkt != nil {
+			if err := sbkt.ForEach(func(sk, sv []byte) error {
+				if sv != nil {
+					return nil
+				}
+				snbkt := sbkt.Bucket(sk)
+
+				return snbkt.ForEach(func(k, v []byte) error {
+					if v != nil {
+						return nil
+					}
+
+					return sendRootRef(ctx, nc, gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k)), snbkt.Bucket(k))
+				})
+			}); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func references(ctx context.Context, tx *bolt.Tx, node gc.Node, fn func(gc.Node)) error {
+	if node.Type == ResourceContent {
+		bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectContent, bucketKeyObjectBlob, []byte(node.Key))
+		if bkt == nil {
+			// Node may be created from dead edge
+			return nil
+		}
+
+		if err := sendSnapshotRefs(node.Namespace, bkt, fn); err != nil {
+			return err
+		}
+		return sendContentRefs(node.Namespace, bkt, fn)
+	} else if node.Type == ResourceSnapshot {
+		parts := strings.SplitN(node.Key, "/", 2)
+		if len(parts) != 2 {
+			return errors.Errorf("invalid snapshot gc key %s", node.Key)
+		}
+		ss := parts[0]
+		name := parts[1]
+
+		bkt := getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots, []byte(ss), []byte(name))
+		if bkt == nil {
+			getBucket(tx, bucketKeyVersion, []byte(node.Namespace), bucketKeyObjectSnapshots).ForEach(func(k, v []byte) error {
+				return nil
+			})
+
+			// Node may be created from dead edge
+			return nil
+		}
+
+		if pv := bkt.Get(bucketKeyParent); len(pv) > 0 {
+			fn(gcnode(ResourceSnapshot, node.Namespace, fmt.Sprintf("%s/%s", ss, pv)))
+		}
+
+		return sendSnapshotRefs(node.Namespace, bkt, fn)
+	}
+
+	return nil
+}
+
+func scanAll(ctx context.Context, tx *bolt.Tx, nc chan<- gc.Node) error {
+	v1bkt := tx.Bucket(bucketKeyVersion)
+	if v1bkt == nil {
+		return nil
+	}
+
+	// iterate through each namespace
+	v1c := v1bkt.Cursor()
+
+	for k, v := v1c.First(); k != nil; k, v = v1c.Next() {
+		if v != nil {
+			continue
+		}
+		nbkt := v1bkt.Bucket(k)
+		ns := string(k)
+
+		sbkt := nbkt.Bucket(bucketKeyObjectSnapshots)
+		if sbkt != nil {
+			if err := sbkt.ForEach(func(sk, sv []byte) error {
+				if sv != nil {
+					return nil
+				}
+				snbkt := sbkt.Bucket(sk)
+				return snbkt.ForEach(func(k, v []byte) error {
+					if v != nil {
+						return nil
+					}
+					select {
+					case nc <- gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", sk, k)):
+					case <-ctx.Done():
+						return ctx.Err()
+					}
+					return nil
+				})
+			}); err != nil {
+				return err
+			}
+		}
+
+		cbkt := nbkt.Bucket(bucketKeyObjectContent)
+		if cbkt != nil {
+			cbkt = cbkt.Bucket(bucketKeyObjectBlob)
+		}
+		if cbkt != nil {
+			if err := cbkt.ForEach(func(k, v []byte) error {
+				if v != nil {
+					return nil
+				}
+				select {
+				case nc <- gcnode(ResourceContent, ns, string(k)):
+				case <-ctx.Done():
+					return ctx.Err()
+				}
+				return nil
+			}); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func remove(ctx context.Context, tx *bolt.Tx, node gc.Node) error {
+	v1bkt := tx.Bucket(bucketKeyVersion)
+	if v1bkt == nil {
+		return nil
+	}
+
+	nsbkt := v1bkt.Bucket([]byte(node.Namespace))
+	if nsbkt == nil {
+		return nil
+	}
+
+	switch node.Type {
+	case ResourceContent:
+		cbkt := nsbkt.Bucket(bucketKeyObjectContent)
+		if cbkt != nil {
+			cbkt = cbkt.Bucket(bucketKeyObjectBlob)
+		}
+		if cbkt != nil {
+			log.G(ctx).WithField("key", node.Key).Debug("delete content")
+			return cbkt.DeleteBucket([]byte(node.Key))
+		}
+	case ResourceSnapshot:
+		sbkt := nsbkt.Bucket(bucketKeyObjectSnapshots)
+		if sbkt != nil {
+			parts := strings.SplitN(node.Key, "/", 2)
+			if len(parts) != 2 {
+				return errors.Errorf("invalid snapshot gc key %s", node.Key)
+			}
+			ssbkt := sbkt.Bucket([]byte(parts[0]))
+			if ssbkt != nil {
+				log.G(ctx).WithField("key", parts[1]).WithField("snapshotter", parts[0]).Debug("delete snapshot")
+				return ssbkt.DeleteBucket([]byte(parts[1]))
+			}
+		}
+	}
+
+	return nil
+}
+
+// sendSnapshotRefs sends all snapshot references referred to by the labels in the bkt
+func sendSnapshotRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error {
+	lbkt := bkt.Bucket(bucketKeyObjectLabels)
+	if lbkt != nil {
+		lc := lbkt.Cursor()
+
+		for k, v := lc.Seek(labelGCSnapRef); k != nil && strings.HasPrefix(string(k), string(labelGCSnapRef)); k, v = lc.Next() {
+			snapshotter := string(k[len(labelGCSnapRef):])
+			fn(gcnode(ResourceSnapshot, ns, fmt.Sprintf("%s/%s", snapshotter, v)))
+		}
+	}
+	return nil
+}
+
+// sendContentRefs sends all content references referred to by the labels in the bkt
+func sendContentRefs(ns string, bkt *bolt.Bucket, fn func(gc.Node)) error {
+	lbkt := bkt.Bucket(bucketKeyObjectLabels)
+	if lbkt != nil {
+		lc := lbkt.Cursor()
+
+		labelRef := string(labelGCContentRef)
+		for k, v := lc.Seek(labelGCContentRef); k != nil && strings.HasPrefix(string(k), labelRef); k, v = lc.Next() {
+			if ks := string(k); ks != labelRef {
+				// Allow reference naming, ignore names
+				if ks[len(labelRef)] != '.' {
+					continue
+				}
+			}
+
+			fn(gcnode(ResourceContent, ns, string(v)))
+		}
+	}
+	return nil
+}
+
+func isRootRef(bkt *bolt.Bucket) bool {
+	lbkt := bkt.Bucket(bucketKeyObjectLabels)
+	if lbkt != nil {
+		rv := lbkt.Get(labelGCRoot)
+		if rv != nil {
+			// TODO: interpret rv as a timestamp and skip if expired
+			return true
+		}
+	}
+	return false
+}
+
+func sendRootRef(ctx context.Context, nc chan<- gc.Node, n gc.Node, bkt *bolt.Bucket) error {
+	if isRootRef(bkt) {
+		select {
+		case nc <- n:
+		case <-ctx.Done():
+			return ctx.Err()
+		}
+	}
+	return nil
+}
+
+func gcnode(t gc.ResourceType, ns, key string) gc.Node {
+	return gc.Node{
+		Type:      t,
+		Namespace: ns,
+		Key:       key,
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/images.go b/vendor/github.com/containerd/containerd/metadata/images.go
new file mode 100644
index 0000000..7e5e3c7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/images.go
@@ -0,0 +1,316 @@
+package metadata
+
+import (
+	"context"
+	"encoding/binary"
+	"fmt"
+	"strings"
+	"time"
+
+	"github.com/boltdb/bolt"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/filters"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/labels"
+	"github.com/containerd/containerd/metadata/boltutil"
+	"github.com/containerd/containerd/namespaces"
+	digest "github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+type imageStore struct {
+	tx *bolt.Tx
+}
+
+// NewImageStore returns a store backed by a bolt DB
+func NewImageStore(tx *bolt.Tx) images.Store {
+	return &imageStore{tx: tx}
+}
+
+func (s *imageStore) Get(ctx context.Context, name string) (images.Image, error) {
+	var image images.Image
+
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return images.Image{}, err
+	}
+
+	bkt := getImagesBucket(s.tx, namespace)
+	if bkt == nil {
+		return images.Image{}, errors.Wrapf(errdefs.ErrNotFound, "image %q", name)
+	}
+
+	ibkt := bkt.Bucket([]byte(name))
+	if ibkt == nil {
+		return images.Image{}, errors.Wrapf(errdefs.ErrNotFound, "image %q", name)
+	}
+
+	image.Name = name
+	if err := readImage(&image, ibkt); err != nil {
+		return images.Image{}, errors.Wrapf(err, "image %q", name)
+	}
+
+	return image, nil
+}
+
+func (s *imageStore) List(ctx context.Context, fs ...string) ([]images.Image, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	filter, err := filters.ParseAll(fs...)
+	if err != nil {
+		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, err.Error())
+	}
+
+	bkt := getImagesBucket(s.tx, namespace)
+	if bkt == nil {
+		return nil, nil // empty store
+	}
+
+	var m []images.Image
+	if err := bkt.ForEach(func(k, v []byte) error {
+		var (
+			image = images.Image{
+				Name: string(k),
+			}
+			kbkt = bkt.Bucket(k)
+		)
+
+		if err := readImage(&image, kbkt); err != nil {
+			return err
+		}
+
+		if filter.Match(adaptImage(image)) {
+			m = append(m, image)
+		}
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	return m, nil
+}
+
+func (s *imageStore) Create(ctx context.Context, image images.Image) (images.Image, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return images.Image{}, err
+	}
+
+	if image.Name == "" {
+		return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for create")
+	}
+
+	if err := validateImage(&image); err != nil {
+		return images.Image{}, err
+	}
+
+	return image, withImagesBucket(s.tx, namespace, func(bkt *bolt.Bucket) error {
+		ibkt, err := bkt.CreateBucket([]byte(image.Name))
+		if err != nil {
+			if err != bolt.ErrBucketExists {
+				return err
+			}
+
+			return errors.Wrapf(errdefs.ErrAlreadyExists, "image %q", image.Name)
+		}
+
+		image.CreatedAt = time.Now().UTC()
+		image.UpdatedAt = image.CreatedAt
+		return writeImage(ibkt, &image)
+	})
+}
+
+func (s *imageStore) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return images.Image{}, err
+	}
+
+	if image.Name == "" {
+		return images.Image{}, errors.Wrapf(errdefs.ErrInvalidArgument, "image name is required for update")
+	}
+
+	var updated images.Image
+	return updated, withImagesBucket(s.tx, namespace, func(bkt *bolt.Bucket) error {
+		ibkt := bkt.Bucket([]byte(image.Name))
+		if ibkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "image %q", image.Name)
+		}
+
+		if err := readImage(&updated, ibkt); err != nil {
+			return errors.Wrapf(err, "image %q", image.Name)
+		}
+		createdat := updated.CreatedAt
+		updated.Name = image.Name
+
+		if len(fieldpaths) > 0 {
+			for _, path := range fieldpaths {
+				if strings.HasPrefix(path, "labels.") {
+					if updated.Labels == nil {
+						updated.Labels = map[string]string{}
+					}
+
+					key := strings.TrimPrefix(path, "labels.")
+					updated.Labels[key] = image.Labels[key]
+					continue
+				}
+
+				switch path {
+				case "labels":
+					updated.Labels = image.Labels
+				case "target":
+					// NOTE(stevvooe): While we allow setting individual labels, we
+					// only support replacing the target as a unit, since that is
+					// commonly pulled as a unit from other sources. It often doesn't
+					// make sense to modify the size or digest without touching the
+					// mediatype, as well, for example.
+					updated.Target = image.Target
+				default:
+					return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on image %q", path, image.Name)
+				}
+			}
+		} else {
+			updated = image
+		}
+
+		if err := validateImage(&image); err != nil {
+			return err
+		}
+
+		updated.CreatedAt = createdat
+		updated.UpdatedAt = time.Now().UTC()
+		return writeImage(ibkt, &updated)
+	})
+}
+
+func (s *imageStore) Delete(ctx context.Context, name string) error {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	return withImagesBucket(s.tx, namespace, func(bkt *bolt.Bucket) error {
+		err := bkt.DeleteBucket([]byte(name))
+		if err == bolt.ErrBucketNotFound {
+			return errors.Wrapf(errdefs.ErrNotFound, "image %q", name)
+		}
+		return err
+	})
+}
+
+func validateImage(image *images.Image) error {
+	if image.Name == "" {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "image name must not be empty")
+	}
+
+	for k, v := range image.Labels {
+		if err := labels.Validate(k, v); err != nil {
+			return errors.Wrapf(err, "image.Labels")
+		}
+	}
+
+	return validateTarget(&image.Target)
+}
+
+func validateTarget(target *ocispec.Descriptor) error {
+	// NOTE(stevvooe): Only validate fields we actually store.
+
+	if err := target.Digest.Validate(); err != nil {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.Digest %q invalid: %v", target.Digest, err)
+	}
+
+	if target.Size <= 0 {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.Size must be greater than zero")
+	}
+
+	if target.MediaType == "" {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "Target.MediaType must be set")
+	}
+
+	return nil
+}
+
+func readImage(image *images.Image, bkt *bolt.Bucket) error {
+	if err := boltutil.ReadTimestamps(bkt, &image.CreatedAt, &image.UpdatedAt); err != nil {
+		return err
+	}
+
+	labels, err := boltutil.ReadLabels(bkt)
+	if err != nil {
+		return err
+	}
+	image.Labels = labels
+
+	tbkt := bkt.Bucket(bucketKeyTarget)
+	if tbkt == nil {
+		return errors.New("unable to read target bucket")
+	}
+	return tbkt.ForEach(func(k, v []byte) error {
+		if v == nil {
+			return nil // skip it? a bkt maybe?
+		}
+
+		// TODO(stevvooe): This is why we need to use byte values for
+		// keys, rather than full arrays.
+		switch string(k) {
+		case string(bucketKeyDigest):
+			image.Target.Digest = digest.Digest(v)
+		case string(bucketKeyMediaType):
+			image.Target.MediaType = string(v)
+		case string(bucketKeySize):
+			image.Target.Size, _ = binary.Varint(v)
+		}
+
+		return nil
+	})
+}
+
+func writeImage(bkt *bolt.Bucket, image *images.Image) error {
+	if err := boltutil.WriteTimestamps(bkt, image.CreatedAt, image.UpdatedAt); err != nil {
+		return err
+	}
+
+	if err := boltutil.WriteLabels(bkt, image.Labels); err != nil {
+		return errors.Wrapf(err, "writing labels for image %v", image.Name)
+	}
+
+	// write the target bucket
+	tbkt, err := bkt.CreateBucketIfNotExists([]byte(bucketKeyTarget))
+	if err != nil {
+		return err
+	}
+
+	sizeEncoded, err := encodeInt(image.Target.Size)
+	if err != nil {
+		return err
+	}
+
+	for _, v := range [][2][]byte{
+		{bucketKeyDigest, []byte(image.Target.Digest)},
+		{bucketKeyMediaType, []byte(image.Target.MediaType)},
+		{bucketKeySize, sizeEncoded},
+	} {
+		if err := tbkt.Put(v[0], v[1]); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func encodeInt(i int64) ([]byte, error) {
+	var (
+		buf      [binary.MaxVarintLen64]byte
+		iEncoded = buf[:]
+	)
+	iEncoded = iEncoded[:binary.PutVarint(iEncoded, i)]
+
+	if len(iEncoded) == 0 {
+		return nil, fmt.Errorf("failed encoding integer = %v", i)
+	}
+	return iEncoded, nil
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/migrations.go b/vendor/github.com/containerd/containerd/metadata/migrations.go
new file mode 100644
index 0000000..997aee2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/migrations.go
@@ -0,0 +1,85 @@
+package metadata
+
+import "github.com/boltdb/bolt"
+
+type migration struct {
+	schema  string
+	version int
+	migrate func(*bolt.Tx) error
+}
+
+// migrations stores the list of database migrations
+// for each update to the database schema. The migrations
+// array MUST be ordered by version from least to greatest.
+// The last entry in the array should correspond to the
+// schemaVersion and dbVersion constants.
+// A migration test MUST be added for each migration in
+// the array.
+// The migrate function can safely assume the version
+// of the data it is migrating from is the previous version
+// of the database.
+var migrations = []migration{
+	{
+		schema:  "v1",
+		version: 1,
+		migrate: addChildLinks,
+	},
+}
+
+// addChildLinks Adds children key to the snapshotters to enforce snapshot
+// entries cannot be removed which have children
+func addChildLinks(tx *bolt.Tx) error {
+	v1bkt := tx.Bucket(bucketKeyVersion)
+	if v1bkt == nil {
+		return nil
+	}
+
+	// iterate through each namespace
+	v1c := v1bkt.Cursor()
+
+	for k, v := v1c.First(); k != nil; k, v = v1c.Next() {
+		if v != nil {
+			continue
+		}
+		nbkt := v1bkt.Bucket(k)
+
+		sbkt := nbkt.Bucket(bucketKeyObjectSnapshots)
+		if sbkt != nil {
+			// Iterate through each snapshotter
+			if err := sbkt.ForEach(func(sk, sv []byte) error {
+				if sv != nil {
+					return nil
+				}
+				snbkt := sbkt.Bucket(sk)
+
+				// Iterate through each snapshot
+				return snbkt.ForEach(func(k, v []byte) error {
+					if v != nil {
+						return nil
+					}
+					parent := snbkt.Bucket(k).Get(bucketKeyParent)
+					if len(parent) > 0 {
+						pbkt := snbkt.Bucket(parent)
+						if pbkt == nil {
+							// Not enforcing consistency during migration, skip
+							return nil
+						}
+						cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren)
+						if err != nil {
+							return err
+						}
+						if err := cbkt.Put(k, nil); err != nil {
+							return err
+						}
+					}
+
+					return nil
+				})
+			}); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/namespaces.go b/vendor/github.com/containerd/containerd/metadata/namespaces.go
new file mode 100644
index 0000000..4b4c4e5
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/namespaces.go
@@ -0,0 +1,163 @@
+package metadata
+
+import (
+	"context"
+
+	"github.com/boltdb/bolt"
+	"github.com/containerd/containerd/errdefs"
+	l "github.com/containerd/containerd/labels"
+	"github.com/containerd/containerd/namespaces"
+	"github.com/pkg/errors"
+)
+
+type namespaceStore struct {
+	tx *bolt.Tx
+}
+
+// NewNamespaceStore returns a store backed by a bolt DB
+func NewNamespaceStore(tx *bolt.Tx) namespaces.Store {
+	return &namespaceStore{tx: tx}
+}
+
+func (s *namespaceStore) Create(ctx context.Context, namespace string, labels map[string]string) error {
+	topbkt, err := createBucketIfNotExists(s.tx, bucketKeyVersion)
+	if err != nil {
+		return err
+	}
+
+	if err := namespaces.Validate(namespace); err != nil {
+		return err
+	}
+
+	for k, v := range labels {
+		if err := l.Validate(k, v); err != nil {
+			return errors.Wrapf(err, "namespace.Labels")
+		}
+	}
+
+	// provides the already exists error.
+	bkt, err := topbkt.CreateBucket([]byte(namespace))
+	if err != nil {
+		if err == bolt.ErrBucketExists {
+			return errors.Wrapf(errdefs.ErrAlreadyExists, "namespace %q", namespace)
+		}
+
+		return err
+	}
+
+	lbkt, err := bkt.CreateBucketIfNotExists(bucketKeyObjectLabels)
+	if err != nil {
+		return err
+	}
+
+	for k, v := range labels {
+		if err := lbkt.Put([]byte(k), []byte(v)); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *namespaceStore) Labels(ctx context.Context, namespace string) (map[string]string, error) {
+	labels := map[string]string{}
+
+	bkt := getNamespaceLabelsBucket(s.tx, namespace)
+	if bkt == nil {
+		return labels, nil
+	}
+
+	if err := bkt.ForEach(func(k, v []byte) error {
+		labels[string(k)] = string(v)
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	return labels, nil
+}
+
+func (s *namespaceStore) SetLabel(ctx context.Context, namespace, key, value string) error {
+	if err := l.Validate(key, value); err != nil {
+		return errors.Wrapf(err, "namespace.Labels")
+	}
+
+	return withNamespacesLabelsBucket(s.tx, namespace, func(bkt *bolt.Bucket) error {
+		if value == "" {
+			return bkt.Delete([]byte(key))
+		}
+
+		return bkt.Put([]byte(key), []byte(value))
+	})
+
+}
+
+func (s *namespaceStore) List(ctx context.Context) ([]string, error) {
+	bkt := getBucket(s.tx, bucketKeyVersion)
+	if bkt == nil {
+		return nil, nil // no namespaces!
+	}
+
+	var namespaces []string
+	if err := bkt.ForEach(func(k, v []byte) error {
+		if v != nil {
+			return nil // not a bucket
+		}
+
+		namespaces = append(namespaces, string(k))
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	return namespaces, nil
+}
+
+func (s *namespaceStore) Delete(ctx context.Context, namespace string) error {
+	bkt := getBucket(s.tx, bucketKeyVersion)
+	if empty, err := s.namespaceEmpty(ctx, namespace); err != nil {
+		return err
+	} else if !empty {
+		return errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace %q must be empty", namespace)
+	}
+
+	if err := bkt.DeleteBucket([]byte(namespace)); err != nil {
+		if err == bolt.ErrBucketNotFound {
+			return errors.Wrapf(errdefs.ErrNotFound, "namespace %q", namespace)
+		}
+
+		return err
+	}
+
+	return nil
+}
+
+func (s *namespaceStore) namespaceEmpty(ctx context.Context, namespace string) (bool, error) {
+	ctx = namespaces.WithNamespace(ctx, namespace)
+
+	// need to check the various object stores.
+
+	imageStore := NewImageStore(s.tx)
+	images, err := imageStore.List(ctx)
+	if err != nil {
+		return false, err
+	}
+	if len(images) > 0 {
+		return false, nil
+	}
+
+	containerStore := NewContainerStore(s.tx)
+	containers, err := containerStore.List(ctx)
+	if err != nil {
+		return false, err
+	}
+
+	if len(containers) > 0 {
+		return false, nil
+	}
+
+	// TODO(stevvooe): Need to add check for content store, as well. Still need
+	// to make content store namespace aware.
+
+	return true, nil
+}
diff --git a/vendor/github.com/containerd/containerd/metadata/snapshot.go b/vendor/github.com/containerd/containerd/metadata/snapshot.go
new file mode 100644
index 0000000..ad38e59
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/metadata/snapshot.go
@@ -0,0 +1,722 @@
+package metadata
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/boltdb/bolt"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/labels"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/metadata/boltutil"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/namespaces"
+	"github.com/containerd/containerd/snapshot"
+	"github.com/pkg/errors"
+)
+
+type snapshotter struct {
+	snapshot.Snapshotter
+	name string
+	db   *DB
+	l    sync.RWMutex
+}
+
+// newSnapshotter returns a new Snapshotter which namespaces the given snapshot
+// using the provided name and database.
+func newSnapshotter(db *DB, name string, sn snapshot.Snapshotter) *snapshotter {
+	return &snapshotter{
+		Snapshotter: sn,
+		name:        name,
+		db:          db,
+	}
+}
+
+func createKey(id uint64, namespace, key string) string {
+	return fmt.Sprintf("%s/%d/%s", namespace, id, key)
+}
+
+func trimKey(key string) string {
+	parts := strings.SplitN(key, "/", 3)
+	if len(parts) < 3 {
+		return ""
+	}
+	return parts[2]
+}
+
+func getKey(tx *bolt.Tx, ns, name, key string) string {
+	bkt := getSnapshotterBucket(tx, ns, name)
+	if bkt == nil {
+		return ""
+	}
+	bkt = bkt.Bucket([]byte(key))
+	if bkt == nil {
+		return ""
+	}
+	v := bkt.Get(bucketKeyName)
+	if len(v) == 0 {
+		return ""
+	}
+	return string(v)
+}
+
+func (s *snapshotter) resolveKey(ctx context.Context, key string) (string, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return "", err
+	}
+
+	var id string
+	if err := view(ctx, s.db, func(tx *bolt.Tx) error {
+		id = getKey(tx, ns, s.name, key)
+		if id == "" {
+			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+		}
+		return nil
+	}); err != nil {
+		return "", err
+	}
+
+	return id, nil
+}
+
+func (s *snapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return snapshot.Info{}, err
+	}
+
+	var (
+		bkey  string
+		local = snapshot.Info{
+			Name: key,
+		}
+	)
+	if err := view(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt := getSnapshotterBucket(tx, ns, s.name)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+		}
+		sbkt := bkt.Bucket([]byte(key))
+		if sbkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+		}
+		local.Labels, err = boltutil.ReadLabels(sbkt)
+		if err != nil {
+			return errors.Wrap(err, "failed to read labels")
+		}
+		if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil {
+			return errors.Wrap(err, "failed to read timestamps")
+		}
+		bkey = string(sbkt.Get(bucketKeyName))
+		local.Parent = string(sbkt.Get(bucketKeyParent))
+
+		return nil
+	}); err != nil {
+		return snapshot.Info{}, err
+	}
+
+	info, err := s.Snapshotter.Stat(ctx, bkey)
+	if err != nil {
+		return snapshot.Info{}, err
+	}
+
+	return overlayInfo(info, local), nil
+}
+
+func (s *snapshotter) Update(ctx context.Context, info snapshot.Info, fieldpaths ...string) (snapshot.Info, error) {
+	s.l.RLock()
+	defer s.l.RUnlock()
+
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return snapshot.Info{}, err
+	}
+
+	if info.Name == "" {
+		return snapshot.Info{}, errors.Wrap(errdefs.ErrInvalidArgument, "")
+	}
+
+	var (
+		bkey  string
+		local = snapshot.Info{
+			Name: info.Name,
+		}
+	)
+	if err := update(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt := getSnapshotterBucket(tx, ns, s.name)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name)
+		}
+		sbkt := bkt.Bucket([]byte(info.Name))
+		if sbkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", info.Name)
+		}
+
+		local.Labels, err = boltutil.ReadLabels(sbkt)
+		if err != nil {
+			return errors.Wrap(err, "failed to read labels")
+		}
+		if err := boltutil.ReadTimestamps(sbkt, &local.Created, &local.Updated); err != nil {
+			return errors.Wrap(err, "failed to read timestamps")
+		}
+
+		// Handle field updates
+		if len(fieldpaths) > 0 {
+			for _, path := range fieldpaths {
+				if strings.HasPrefix(path, "labels.") {
+					if local.Labels == nil {
+						local.Labels = map[string]string{}
+					}
+
+					key := strings.TrimPrefix(path, "labels.")
+					local.Labels[key] = info.Labels[key]
+					continue
+				}
+
+				switch path {
+				case "labels":
+					local.Labels = info.Labels
+				default:
+					return errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on snapshot %q", path, info.Name)
+				}
+			}
+		} else {
+			local.Labels = info.Labels
+		}
+		if err := validateSnapshot(&local); err != nil {
+			return err
+		}
+		local.Updated = time.Now().UTC()
+
+		if err := boltutil.WriteTimestamps(sbkt, local.Created, local.Updated); err != nil {
+			return errors.Wrap(err, "failed to read timestamps")
+		}
+		if err := boltutil.WriteLabels(sbkt, local.Labels); err != nil {
+			return errors.Wrap(err, "failed to read labels")
+		}
+		bkey = string(sbkt.Get(bucketKeyName))
+		local.Parent = string(sbkt.Get(bucketKeyParent))
+
+		return nil
+	}); err != nil {
+		return snapshot.Info{}, err
+	}
+
+	info, err = s.Snapshotter.Stat(ctx, bkey)
+	if err != nil {
+		return snapshot.Info{}, err
+	}
+
+	return overlayInfo(info, local), nil
+}
+
+func overlayInfo(info, overlay snapshot.Info) snapshot.Info {
+	// Merge info
+	info.Name = overlay.Name
+	info.Created = overlay.Created
+	info.Updated = overlay.Updated
+	info.Parent = overlay.Parent
+	if info.Labels == nil {
+		info.Labels = overlay.Labels
+	} else {
+		for k, v := range overlay.Labels {
+			overlay.Labels[k] = v
+		}
+	}
+	return info
+}
+
+func (s *snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) {
+	bkey, err := s.resolveKey(ctx, key)
+	if err != nil {
+		return snapshot.Usage{}, err
+	}
+	return s.Snapshotter.Usage(ctx, bkey)
+}
+
+func (s *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
+	bkey, err := s.resolveKey(ctx, key)
+	if err != nil {
+		return nil, err
+	}
+	return s.Snapshotter.Mounts(ctx, bkey)
+}
+
+func (s *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
+	return s.createSnapshot(ctx, key, parent, false, opts)
+}
+
+func (s *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
+	return s.createSnapshot(ctx, key, parent, true, opts)
+}
+
+func (s *snapshotter) createSnapshot(ctx context.Context, key, parent string, readonly bool, opts []snapshot.Opt) ([]mount.Mount, error) {
+	s.l.RLock()
+	defer s.l.RUnlock()
+
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	var base snapshot.Info
+	for _, opt := range opts {
+		if err := opt(&base); err != nil {
+			return nil, err
+		}
+	}
+
+	if err := validateSnapshot(&base); err != nil {
+		return nil, err
+	}
+
+	var m []mount.Mount
+	if err := update(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt, err := createSnapshotterBucket(tx, ns, s.name)
+		if err != nil {
+			return err
+		}
+
+		bbkt, err := bkt.CreateBucket([]byte(key))
+		if err != nil {
+			if err == bolt.ErrBucketExists {
+				err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", key)
+			}
+			return err
+		}
+		var bparent string
+		if parent != "" {
+			pbkt := bkt.Bucket([]byte(parent))
+			if pbkt == nil {
+				return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", parent)
+			}
+			bparent = string(pbkt.Get(bucketKeyName))
+
+			cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren)
+			if err != nil {
+				return err
+			}
+			if err := cbkt.Put([]byte(key), nil); err != nil {
+				return err
+			}
+
+			if err := bbkt.Put(bucketKeyParent, []byte(parent)); err != nil {
+				return err
+			}
+		}
+
+		sid, err := bkt.NextSequence()
+		if err != nil {
+			return err
+		}
+		bkey := createKey(sid, ns, key)
+		if err := bbkt.Put(bucketKeyName, []byte(bkey)); err != nil {
+			return err
+		}
+
+		ts := time.Now().UTC()
+		if err := boltutil.WriteTimestamps(bbkt, ts, ts); err != nil {
+			return err
+		}
+		if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil {
+			return err
+		}
+
+		// TODO: Consider doing this outside of transaction to lessen
+		// metadata lock time
+		if readonly {
+			m, err = s.Snapshotter.View(ctx, bkey, bparent)
+		} else {
+			m, err = s.Snapshotter.Prepare(ctx, bkey, bparent)
+		}
+		return err
+	}); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (s *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshot.Opt) error {
+	s.l.RLock()
+	defer s.l.RUnlock()
+
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	var base snapshot.Info
+	for _, opt := range opts {
+		if err := opt(&base); err != nil {
+			return err
+		}
+	}
+
+	if err := validateSnapshot(&base); err != nil {
+		return err
+	}
+
+	return update(ctx, s.db, func(tx *bolt.Tx) error {
+		bkt := getSnapshotterBucket(tx, ns, s.name)
+		if bkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+		}
+
+		bbkt, err := bkt.CreateBucket([]byte(name))
+		if err != nil {
+			if err == bolt.ErrBucketExists {
+				err = errors.Wrapf(errdefs.ErrAlreadyExists, "snapshot %v already exists", name)
+			}
+			return err
+		}
+
+		obkt := bkt.Bucket([]byte(key))
+		if obkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+		}
+
+		bkey := string(obkt.Get(bucketKeyName))
+
+		sid, err := bkt.NextSequence()
+		if err != nil {
+			return err
+		}
+
+		nameKey := createKey(sid, ns, name)
+
+		if err := bbkt.Put(bucketKeyName, []byte(nameKey)); err != nil {
+			return err
+		}
+
+		parent := obkt.Get(bucketKeyParent)
+		if len(parent) > 0 {
+			pbkt := bkt.Bucket(parent)
+			if pbkt == nil {
+				return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", string(parent))
+			}
+
+			cbkt, err := pbkt.CreateBucketIfNotExists(bucketKeyChildren)
+			if err != nil {
+				return err
+			}
+			if err := cbkt.Delete([]byte(key)); err != nil {
+				return err
+			}
+			if err := cbkt.Put([]byte(name), nil); err != nil {
+				return err
+			}
+
+			if err := bbkt.Put(bucketKeyParent, parent); err != nil {
+				return err
+			}
+		}
+		ts := time.Now().UTC()
+		if err := boltutil.WriteTimestamps(bbkt, ts, ts); err != nil {
+			return err
+		}
+		if err := boltutil.WriteLabels(bbkt, base.Labels); err != nil {
+			return err
+		}
+		if err := bkt.DeleteBucket([]byte(key)); err != nil {
+			return err
+		}
+
+		// TODO: Consider doing this outside of transaction to lessen
+		// metadata lock time
+		return s.Snapshotter.Commit(ctx, nameKey, bkey)
+	})
+
+}
+
+func (s *snapshotter) Remove(ctx context.Context, key string) error {
+	s.l.RLock()
+	defer s.l.RUnlock()
+
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	return update(ctx, s.db, func(tx *bolt.Tx) error {
+		var sbkt *bolt.Bucket
+		bkt := getSnapshotterBucket(tx, ns, s.name)
+		if bkt != nil {
+			sbkt = bkt.Bucket([]byte(key))
+		}
+		if sbkt == nil {
+			return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v does not exist", key)
+		}
+
+		cbkt := sbkt.Bucket(bucketKeyChildren)
+		if cbkt != nil {
+			if child, _ := cbkt.Cursor().First(); child != nil {
+				return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot remove snapshot with child")
+			}
+		}
+
+		parent := sbkt.Get(bucketKeyParent)
+		if len(parent) > 0 {
+			pbkt := bkt.Bucket(parent)
+			if pbkt == nil {
+				return errors.Wrapf(errdefs.ErrNotFound, "parent snapshot %v does not exist", string(parent))
+			}
+			cbkt := pbkt.Bucket(bucketKeyChildren)
+			if cbkt != nil {
+				if err := cbkt.Delete([]byte(key)); err != nil {
+					return errors.Wrap(err, "failed to remove child link")
+				}
+			}
+		}
+
+		if err := bkt.DeleteBucket([]byte(key)); err != nil {
+			return err
+		}
+
+		// Mark snapshotter as dirty for triggering garbage collection
+		s.db.dirtyL.Lock()
+		s.db.dirtySS[s.name] = struct{}{}
+		s.db.dirtyL.Unlock()
+
+		return nil
+	})
+}
+
+type infoPair struct {
+	bkey string
+	info snapshot.Info
+}
+
+func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+
+	var (
+		batchSize = 100
+		pairs     = []infoPair{}
+		lastKey   string
+	)
+
+	for {
+		if err := view(ctx, s.db, func(tx *bolt.Tx) error {
+			bkt := getSnapshotterBucket(tx, ns, s.name)
+			if bkt == nil {
+				return nil
+			}
+
+			c := bkt.Cursor()
+
+			var k, v []byte
+			if lastKey == "" {
+				k, v = c.First()
+			} else {
+				k, v = c.Seek([]byte(lastKey))
+			}
+
+			for k != nil {
+				if v == nil {
+					if len(pairs) >= batchSize {
+						break
+					}
+					sbkt := bkt.Bucket(k)
+
+					pair := infoPair{
+						bkey: string(sbkt.Get(bucketKeyName)),
+						info: snapshot.Info{
+							Name:   string(k),
+							Parent: string(sbkt.Get(bucketKeyParent)),
+						},
+					}
+
+					err := boltutil.ReadTimestamps(sbkt, &pair.info.Created, &pair.info.Updated)
+					if err != nil {
+						return err
+					}
+					pair.info.Labels, err = boltutil.ReadLabels(sbkt)
+					if err != nil {
+						return err
+					}
+
+					pairs = append(pairs, pair)
+				}
+
+				k, v = c.Next()
+			}
+
+			lastKey = string(k)
+
+			return nil
+		}); err != nil {
+			return err
+		}
+
+		for _, pair := range pairs {
+			info, err := s.Snapshotter.Stat(ctx, pair.bkey)
+			if err != nil {
+				if errdefs.IsNotFound(err) {
+					continue
+				}
+				return err
+			}
+
+			if err := fn(ctx, overlayInfo(info, pair.info)); err != nil {
+				return err
+			}
+		}
+
+		if lastKey == "" {
+			break
+		}
+
+		pairs = pairs[:0]
+
+	}
+
+	return nil
+}
+
+func validateSnapshot(info *snapshot.Info) error {
+	for k, v := range info.Labels {
+		if err := labels.Validate(k, v); err != nil {
+			return errors.Wrapf(err, "info.Labels")
+		}
+	}
+
+	return nil
+}
+
+func (s *snapshotter) garbageCollect(ctx context.Context) error {
+	logger := log.G(ctx).WithField("snapshotter", s.name)
+	lt1 := time.Now()
+	s.l.Lock()
+	defer func() {
+		s.l.Unlock()
+		logger.WithField("t", time.Now().Sub(lt1)).Debugf("garbage collected")
+	}()
+
+	seen := map[string]struct{}{}
+	if err := s.db.View(func(tx *bolt.Tx) error {
+		v1bkt := tx.Bucket(bucketKeyVersion)
+		if v1bkt == nil {
+			return nil
+		}
+
+		// iterate through each namespace
+		v1c := v1bkt.Cursor()
+
+		for k, v := v1c.First(); k != nil; k, v = v1c.Next() {
+			if v != nil {
+				continue
+			}
+
+			sbkt := v1bkt.Bucket(k).Bucket(bucketKeyObjectSnapshots)
+			if sbkt == nil {
+				continue
+			}
+
+			// Load specific snapshotter
+			ssbkt := sbkt.Bucket([]byte(s.name))
+			if ssbkt == nil {
+				continue
+			}
+
+			if err := ssbkt.ForEach(func(sk, sv []byte) error {
+				if sv == nil {
+					bkey := ssbkt.Bucket(sk).Get(bucketKeyName)
+					if len(bkey) > 0 {
+						seen[string(bkey)] = struct{}{}
+					}
+				}
+				return nil
+			}); err != nil {
+				return err
+			}
+		}
+
+		return nil
+	}); err != nil {
+		return err
+	}
+
+	roots, err := s.walkTree(ctx, seen)
+	if err != nil {
+		return err
+	}
+
+	// TODO: Unlock before prune (once nodes are fully unavailable)
+
+	for _, node := range roots {
+		if err := s.pruneBranch(ctx, node); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+type treeNode struct {
+	info     snapshot.Info
+	remove   bool
+	children []*treeNode
+}
+
+func (s *snapshotter) walkTree(ctx context.Context, seen map[string]struct{}) ([]*treeNode, error) {
+	roots := []*treeNode{}
+	nodes := map[string]*treeNode{}
+
+	if err := s.Snapshotter.Walk(ctx, func(ctx context.Context, info snapshot.Info) error {
+		_, isSeen := seen[info.Name]
+		node, ok := nodes[info.Name]
+		if !ok {
+			node = &treeNode{}
+			nodes[info.Name] = node
+		}
+
+		node.remove = !isSeen
+		node.info = info
+
+		if info.Parent == "" {
+			roots = append(roots, node)
+		} else {
+			parent, ok := nodes[info.Parent]
+			if !ok {
+				parent = &treeNode{}
+				nodes[info.Parent] = parent
+			}
+			parent.children = append(parent.children, node)
+		}
+
+		return nil
+	}); err != nil {
+		return nil, err
+	}
+
+	return roots, nil
+}
+
+func (s *snapshotter) pruneBranch(ctx context.Context, node *treeNode) error {
+	for _, child := range node.children {
+		if err := s.pruneBranch(ctx, child); err != nil {
+			return err
+		}
+	}
+
+	if node.remove {
+		logger := log.G(ctx).WithField("snapshotter", s.name)
+		if err := s.Snapshotter.Remove(ctx, node.info.Name); err != nil {
+			if !errdefs.IsFailedPrecondition(err) {
+				return err
+			}
+			logger.WithError(err).WithField("key", node.info.Name).Warnf("snapshot removal failed")
+		} else {
+			logger.WithField("key", node.info.Name).Debug("removed snapshot")
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unix.go b/vendor/github.com/containerd/containerd/mount/lookup_unix.go
new file mode 100644
index 0000000..df9625a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/lookup_unix.go
@@ -0,0 +1,46 @@
+// +build !windows
+
+package mount
+
+import (
+	"fmt"
+	"path/filepath"
+	"sort"
+	"strings"
+	"syscall"
+
+	"github.com/pkg/errors"
+)
+
+// Lookup returns the mount info corresponds to the path.
+func Lookup(dir string) (Info, error) {
+	var dirStat syscall.Stat_t
+	dir = filepath.Clean(dir)
+	if err := syscall.Stat(dir, &dirStat); err != nil {
+		return Info{}, errors.Wrapf(err, "failed to access %q", dir)
+	}
+
+	mounts, err := Self()
+	if err != nil {
+		return Info{}, err
+	}
+
+	// Sort descending order by Info.Mountpoint
+	sort.Slice(mounts, func(i, j int) bool {
+		return mounts[j].Mountpoint < mounts[i].Mountpoint
+	})
+	for _, m := range mounts {
+		// Note that m.{Major, Minor} are generally unreliable for our purpose here
+		// https://www.spinics.net/lists/linux-btrfs/msg58908.html
+		var st syscall.Stat_t
+		if err := syscall.Stat(m.Mountpoint, &st); err != nil {
+			// may fail; ignore err
+			continue
+		}
+		if st.Dev == dirStat.Dev && strings.HasPrefix(dir, m.Mountpoint) {
+			return m, nil
+		}
+	}
+
+	return Info{}, fmt.Errorf("failed to find the mount info for %q", dir)
+}
diff --git a/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go
new file mode 100644
index 0000000..e5f84e7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/lookup_unsupported.go
@@ -0,0 +1,13 @@
+// +build windows
+
+package mount
+
+import (
+	"fmt"
+	"runtime"
+)
+
+// Lookup returns the mount info corresponds to the path.
+func Lookup(dir string) (Info, error) {
+	return Info{}, fmt.Errorf("mount.Lookup is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mount.go b/vendor/github.com/containerd/containerd/mount/mount.go
new file mode 100644
index 0000000..94c2c9f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mount.go
@@ -0,0 +1,24 @@
+package mount
+
+// Mount is the lingua franca of containerd. A mount represents a
+// serialized mount syscall. Components either emit or consume mounts.
+type Mount struct {
+	// Type specifies the host-specific of the mount.
+	Type string
+	// Source specifies where to mount from. Depending on the host system, this
+	// can be a source path or device.
+	Source string
+	// Options contains zero or more fstab-style mount options. Typically,
+	// these are platform specific.
+	Options []string
+}
+
+// All mounts all the provided mounts to the provided target
+func All(mounts []Mount, target string) error {
+	for _, m := range mounts {
+		if err := m.Mount(target); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go
new file mode 100644
index 0000000..474792d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go
@@ -0,0 +1,120 @@
+package mount
+
+import (
+	"strings"
+
+	"golang.org/x/sys/unix"
+)
+
+// Mount to the provided target path
+func (m *Mount) Mount(target string) error {
+	flags, data := parseMountOptions(m.Options)
+
+	// propagation types.
+	const ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
+
+	// Ensure propagation type change flags aren't included in other calls.
+	oflags := flags &^ ptypes
+
+	// In the case of remounting with changed data (data != ""), need to call mount (moby/moby#34077).
+	if flags&unix.MS_REMOUNT == 0 || data != "" {
+		// Initial call applying all non-propagation flags for mount
+		// or remount with changed data
+		if err := unix.Mount(m.Source, target, m.Type, uintptr(oflags), data); err != nil {
+			return err
+		}
+	}
+
+	if flags&ptypes != 0 {
+		// Change the propagation type.
+		const pflags = ptypes | unix.MS_REC | unix.MS_SILENT
+		if err := unix.Mount("", target, "", uintptr(flags&pflags), ""); err != nil {
+			return err
+		}
+	}
+
+	const broflags = unix.MS_BIND | unix.MS_RDONLY
+	if oflags&broflags == broflags {
+		// Remount the bind to apply read only.
+		return unix.Mount("", target, "", uintptr(oflags|unix.MS_REMOUNT), "")
+	}
+	return nil
+}
+
+// Unmount the provided mount path with the flags
+func Unmount(mount string, flags int) error {
+	return unix.Unmount(mount, flags)
+}
+
+// UnmountAll repeatedly unmounts the given mount point until there
+// are no mounts remaining (EINVAL is returned by mount), which is
+// useful for undoing a stack of mounts on the same mount point.
+func UnmountAll(mount string, flags int) error {
+	for {
+		if err := Unmount(mount, flags); err != nil {
+			// EINVAL is returned if the target is not a
+			// mount point, indicating that we are
+			// done. It can also indicate a few other
+			// things (such as invalid flags) which we
+			// unfortunately end up squelching here too.
+			if err == unix.EINVAL {
+				return nil
+			}
+			return err
+		}
+	}
+}
+
+// parseMountOptions takes fstab style mount options and parses them for
+// use with a standard mount() syscall
+func parseMountOptions(options []string) (int, string) {
+	var (
+		flag int
+		data []string
+	)
+	flags := map[string]struct {
+		clear bool
+		flag  int
+	}{
+		"async":         {true, unix.MS_SYNCHRONOUS},
+		"atime":         {true, unix.MS_NOATIME},
+		"bind":          {false, unix.MS_BIND},
+		"defaults":      {false, 0},
+		"dev":           {true, unix.MS_NODEV},
+		"diratime":      {true, unix.MS_NODIRATIME},
+		"dirsync":       {false, unix.MS_DIRSYNC},
+		"exec":          {true, unix.MS_NOEXEC},
+		"mand":          {false, unix.MS_MANDLOCK},
+		"noatime":       {false, unix.MS_NOATIME},
+		"nodev":         {false, unix.MS_NODEV},
+		"nodiratime":    {false, unix.MS_NODIRATIME},
+		"noexec":        {false, unix.MS_NOEXEC},
+		"nomand":        {true, unix.MS_MANDLOCK},
+		"norelatime":    {true, unix.MS_RELATIME},
+		"nostrictatime": {true, unix.MS_STRICTATIME},
+		"nosuid":        {false, unix.MS_NOSUID},
+		"rbind":         {false, unix.MS_BIND | unix.MS_REC},
+		"relatime":      {false, unix.MS_RELATIME},
+		"remount":       {false, unix.MS_REMOUNT},
+		"ro":            {false, unix.MS_RDONLY},
+		"rw":            {true, unix.MS_RDONLY},
+		"strictatime":   {false, unix.MS_STRICTATIME},
+		"suid":          {true, unix.MS_NOSUID},
+		"sync":          {false, unix.MS_SYNCHRONOUS},
+	}
+	for _, o := range options {
+		// If the option does not exist in the flags table or the flag
+		// is not supported on the platform,
+		// then it is a data value for a specific fs type
+		if f, exists := flags[o]; exists && f.flag != 0 {
+			if f.clear {
+				flag &^= f.flag
+			} else {
+				flag |= f.flag
+			}
+		} else {
+			data = append(data, o)
+		}
+	}
+	return flag, strings.Join(data, ",")
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mount_solaris.go b/vendor/github.com/containerd/containerd/mount/mount_solaris.go
new file mode 100644
index 0000000..3b6c35d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mount_solaris.go
@@ -0,0 +1,83 @@
+package mount
+
+// On Solaris we can't invoke the mount system call directly.  First,
+// the mount system call takes more than 6 arguments, and go doesn't
+// support invoking system calls that take more than 6 arguments.  Past
+// that, the mount system call is a private interfaces.  For example,
+// the arguments and data structures passed to the kernel to create an
+// nfs mount are private and can change at any time.  The only public
+// and stable interface for creating mounts on Solaris is the mount.8
+// command, so we'll invoke that here.
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"os/exec"
+	"strings"
+
+	"golang.org/x/sys/unix"
+)
+
+const (
+	mountCmd = "/usr/sbin/mount"
+)
+
+func doMount(arg ...string) error {
+	cmd := exec.Command(mountCmd, arg...)
+
+	/* Setup Stdin, Stdout, and Stderr */
+	stderr := new(bytes.Buffer)
+	cmd.Stdin = nil
+	cmd.Stdout = nil
+	cmd.Stderr = stderr
+
+	/*
+	 * Run the command.  If the command fails create a new error
+	 * object to return that includes stderr output.
+	 */
+	err := cmd.Start()
+	if err != nil {
+		return err
+	}
+	err = cmd.Wait()
+	if err != nil {
+		return errors.New(fmt.Sprintf("%v: %s", err, stderr.String()))
+	}
+	return nil
+}
+
+func (m *Mount) Mount(target string) error {
+	var err error
+
+	if len(m.Options) == 0 {
+		err = doMount("-F", m.Type, m.Source, target)
+	} else {
+		err = doMount("-F", m.Type, "-o", strings.Join(m.Options, ","),
+			m.Source, target)
+	}
+	return err
+}
+
+func Unmount(mount string, flags int) error {
+	return unix.Unmount(mount, flags)
+}
+
+// UnmountAll repeatedly unmounts the given mount point until there
+// are no mounts remaining (EINVAL is returned by mount), which is
+// useful for undoing a stack of mounts on the same mount point.
+func UnmountAll(mount string, flags int) error {
+	for {
+		if err := Unmount(mount, flags); err != nil {
+			// EINVAL is returned if the target is not a
+			// mount point, indicating that we are
+			// done. It can also indicate a few other
+			// things (such as invalid flags) which we
+			// unfortunately end up squelching here too.
+			if err == unix.EINVAL {
+				return nil
+			}
+			return err
+		}
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mount_unix.go b/vendor/github.com/containerd/containerd/mount/mount_unix.go
new file mode 100644
index 0000000..23467a8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mount_unix.go
@@ -0,0 +1,21 @@
+// +build darwin freebsd
+
+package mount
+
+import "github.com/pkg/errors"
+
+var (
+	ErrNotImplementOnUnix = errors.New("not implemented under unix")
+)
+
+func (m *Mount) Mount(target string) error {
+	return ErrNotImplementOnUnix
+}
+
+func Unmount(mount string, flags int) error {
+	return ErrNotImplementOnUnix
+}
+
+func UnmountAll(mount string, flags int) error {
+	return ErrNotImplementOnUnix
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mount_windows.go b/vendor/github.com/containerd/containerd/mount/mount_windows.go
new file mode 100644
index 0000000..8eeca68
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mount_windows.go
@@ -0,0 +1,19 @@
+package mount
+
+import "github.com/pkg/errors"
+
+var (
+	ErrNotImplementOnWindows = errors.New("not implemented under windows")
+)
+
+func (m *Mount) Mount(target string) error {
+	return ErrNotImplementOnWindows
+}
+
+func Unmount(mount string, flags int) error {
+	return ErrNotImplementOnWindows
+}
+
+func UnmountAll(mount string, flags int) error {
+	return ErrNotImplementOnWindows
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo.go b/vendor/github.com/containerd/containerd/mount/mountinfo.go
new file mode 100644
index 0000000..2192cce
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mountinfo.go
@@ -0,0 +1,40 @@
+package mount
+
+// Info reveals information about a particular mounted filesystem. This
+// struct is populated from the content in the /proc/<pid>/mountinfo file.
+type Info struct {
+	// ID is a unique identifier of the mount (may be reused after umount).
+	ID int
+
+	// Parent indicates the ID of the mount parent (or of self for the top of the
+	// mount tree).
+	Parent int
+
+	// Major indicates one half of the device ID which identifies the device class.
+	Major int
+
+	// Minor indicates one half of the device ID which identifies a specific
+	// instance of device.
+	Minor int
+
+	// Root of the mount within the filesystem.
+	Root string
+
+	// Mountpoint indicates the mount point relative to the process's root.
+	Mountpoint string
+
+	// Options represents mount-specific options.
+	Options string
+
+	// Optional represents optional fields.
+	Optional string
+
+	// FSType indicates the type of filesystem, such as EXT3.
+	FSType string
+
+	// Source indicates filesystem specific information or "none".
+	Source string
+
+	// VFSOptions represents per super block options.
+	VFSOptions string
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go b/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go
new file mode 100644
index 0000000..d2bc075
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mountinfo_freebsd.go
@@ -0,0 +1,45 @@
+package mount
+
+/*
+#include <sys/param.h>
+#include <sys/ucred.h>
+#include <sys/mount.h>
+*/
+import "C"
+
+import (
+	"fmt"
+	"reflect"
+	"unsafe"
+)
+
+// Self retrieves a list of mounts for the current running process.
+func Self() ([]Info, error) {
+	var rawEntries *C.struct_statfs
+
+	count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
+	if count == 0 {
+		return nil, fmt.Errorf("Failed to call getmntinfo")
+	}
+
+	var entries []C.struct_statfs
+	header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
+	header.Cap = count
+	header.Len = count
+	header.Data = uintptr(unsafe.Pointer(rawEntries))
+
+	var out []Info
+	for _, entry := range entries {
+		var mountinfo Info
+		mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
+		mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
+		mountinfo.FSType = C.GoString(&entry.f_fstypename[0])
+		out = append(out, mountinfo)
+	}
+	return out, nil
+}
+
+// PID collects the mounts for a specific process ID.
+func PID(pid int) ([]Info, error) {
+	return nil, fmt.Errorf("mountinfo.PID is not implemented on freebsd")
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
new file mode 100644
index 0000000..0237722
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mountinfo_linux.go
@@ -0,0 +1,94 @@
+// +build linux
+
+package mount
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"strings"
+)
+
+const (
+	/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
+	   (1)(2)(3)   (4)   (5)      (6)      (7)   (8) (9)   (10)         (11)
+
+	   (1) mount ID:  unique identifier of the mount (may be reused after umount)
+	   (2) parent ID:  ID of parent (or of self for the top of the mount tree)
+	   (3) major:minor:  value of st_dev for files on filesystem
+	   (4) root:  root of the mount within the filesystem
+	   (5) mount point:  mount point relative to the process's root
+	   (6) mount options:  per mount options
+	   (7) optional fields:  zero or more fields of the form "tag[:value]"
+	   (8) separator:  marks the end of the optional fields
+	   (9) filesystem type:  name of filesystem of the form "type[.subtype]"
+	   (10) mount source:  filesystem specific information or "none"
+	   (11) super options:  per super block options*/
+	mountinfoFormat = "%d %d %d:%d %s %s %s %s"
+)
+
+// Self retrieves a list of mounts for the current running process.
+func Self() ([]Info, error) {
+	f, err := os.Open("/proc/self/mountinfo")
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseInfoFile(f)
+}
+
+func parseInfoFile(r io.Reader) ([]Info, error) {
+	var (
+		s   = bufio.NewScanner(r)
+		out = []Info{}
+	)
+
+	for s.Scan() {
+		if err := s.Err(); err != nil {
+			return nil, err
+		}
+
+		var (
+			p              = Info{}
+			text           = s.Text()
+			optionalFields string
+		)
+
+		if _, err := fmt.Sscanf(text, mountinfoFormat,
+			&p.ID, &p.Parent, &p.Major, &p.Minor,
+			&p.Root, &p.Mountpoint, &p.Options, &optionalFields); err != nil {
+			return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
+		}
+		// Safe as mountinfo encodes mountpoints with spaces as \040.
+		index := strings.Index(text, " - ")
+		postSeparatorFields := strings.Fields(text[index+3:])
+		if len(postSeparatorFields) < 3 {
+			return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
+		}
+
+		if optionalFields != "-" {
+			p.Optional = optionalFields
+		}
+
+		p.FSType = postSeparatorFields[0]
+		p.Source = postSeparatorFields[1]
+		p.VFSOptions = strings.Join(postSeparatorFields[2:], " ")
+		out = append(out, p)
+	}
+	return out, nil
+}
+
+// PID collects the mounts for a specific process ID. If the process
+// ID is unknown, it is better to use `Self` which will inspect
+// "/proc/self/mountinfo" instead.
+func PID(pid int) ([]Info, error) {
+	f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	return parseInfoFile(f)
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_solaris.go b/vendor/github.com/containerd/containerd/mount/mountinfo_solaris.go
new file mode 100644
index 0000000..aaafad3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mountinfo_solaris.go
@@ -0,0 +1,50 @@
+// +build solaris,cgo
+
+package mount
+
+/*
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mnttab.h>
+*/
+import "C"
+
+import (
+	"fmt"
+	"unsafe"
+)
+
+// Self retrieves a list of mounts for the current running process.
+func Self() ([]Info, error) {
+	path := C.CString(C.MNTTAB)
+	defer C.free(unsafe.Pointer(path))
+	mode := C.CString("r")
+	defer C.free(unsafe.Pointer(mode))
+
+	mnttab := C.fopen(path, mode)
+	if mnttab == nil {
+		return nil, fmt.Errorf("Failed to open %s", C.MNTTAB)
+	}
+
+	var out []Info
+	var mp C.struct_mnttab
+
+	ret := C.getmntent(mnttab, &mp)
+	for ret == 0 {
+		var mountinfo Info
+		mountinfo.Mountpoint = C.GoString(mp.mnt_mountp)
+		mountinfo.Source = C.GoString(mp.mnt_special)
+		mountinfo.FSType = C.GoString(mp.mnt_fstype)
+		mountinfo.Options = C.GoString(mp.mnt_mntopts)
+		out = append(out, mountinfo)
+		ret = C.getmntent(mnttab, &mp)
+	}
+
+	C.fclose(mnttab)
+	return out, nil
+}
+
+// PID collects the mounts for a specific process ID.
+func PID(pid int) ([]Info, error) {
+	return nil, fmt.Errorf("mountinfo.PID is not implemented on solaris")
+}
diff --git a/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go b/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go
new file mode 100644
index 0000000..aa659d8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/mount/mountinfo_unsupported.go
@@ -0,0 +1,18 @@
+// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo
+
+package mount
+
+import (
+	"fmt"
+	"runtime"
+)
+
+// Self retrieves a list of mounts for the current running process.
+func Self() ([]Info, error) {
+	return nil, fmt.Errorf("mountinfo.Self is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// PID collects the mounts for a specific process ID.
+func PID(pid int) ([]Info, error) {
+	return nil, fmt.Errorf("mountinfo.PID is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/github.com/containerd/containerd/namespaces/context.go b/vendor/github.com/containerd/containerd/namespaces/context.go
new file mode 100644
index 0000000..7087114
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/namespaces/context.go
@@ -0,0 +1,63 @@
+package namespaces
+
+import (
+	"os"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+)
+
+const (
+	// NamespaceEnvVar is the environment variable key name
+	NamespaceEnvVar = "CONTAINERD_NAMESPACE"
+	// Default is the name of the default namespace
+	Default = "default"
+)
+
+type namespaceKey struct{}
+
+// WithNamespace sets a given namespace on the context
+func WithNamespace(ctx context.Context, namespace string) context.Context {
+	ctx = context.WithValue(ctx, namespaceKey{}, namespace) // set our key for namespace
+
+	// also store on the grpc headers so it gets picked up by any clients that
+	// are using this.
+	return withGRPCNamespaceHeader(ctx, namespace)
+}
+
+// NamespaceFromEnv uses the namespace defined in CONTAINERD_NAMESPACE or
+// default
+func NamespaceFromEnv(ctx context.Context) context.Context {
+	namespace := os.Getenv(NamespaceEnvVar)
+	if namespace == "" {
+		namespace = Default
+	}
+	return WithNamespace(ctx, namespace)
+}
+
+// Namespace returns the namespace from the context.
+//
+// The namespace is not guaranteed to be valid.
+func Namespace(ctx context.Context) (string, bool) {
+	namespace, ok := ctx.Value(namespaceKey{}).(string)
+	if !ok {
+		return fromGRPCHeader(ctx)
+	}
+
+	return namespace, ok
+}
+
+// NamespaceRequired returns the valid namepace from the context or an error.
+func NamespaceRequired(ctx context.Context) (string, error) {
+	namespace, ok := Namespace(ctx)
+	if !ok || namespace == "" {
+		return "", errors.Wrapf(errdefs.ErrFailedPrecondition, "namespace is required")
+	}
+
+	if err := Validate(namespace); err != nil {
+		return "", errors.Wrap(err, "namespace validation")
+	}
+
+	return namespace, nil
+}
diff --git a/vendor/github.com/containerd/containerd/namespaces/grpc.go b/vendor/github.com/containerd/containerd/namespaces/grpc.go
new file mode 100644
index 0000000..c18fc93
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/namespaces/grpc.go
@@ -0,0 +1,44 @@
+package namespaces
+
+import (
+	"golang.org/x/net/context"
+	"google.golang.org/grpc/metadata"
+)
+
+const (
+	// GRPCHeader defines the header name for specifying a containerd namespace.
+	GRPCHeader = "containerd-namespace"
+)
+
+// NOTE(stevvooe): We can stub this file out if we don't want a grpc dependency here.
+
+func withGRPCNamespaceHeader(ctx context.Context, namespace string) context.Context {
+	// also store on the grpc headers so it gets picked up by any clients that
+	// are using this.
+	nsheader := metadata.Pairs(GRPCHeader, namespace)
+	md, ok := metadata.FromOutgoingContext(ctx) // merge with outgoing context.
+	if !ok {
+		md = nsheader
+	} else {
+		// order ensures the latest is first in this list.
+		md = metadata.Join(nsheader, md)
+	}
+
+	return metadata.NewOutgoingContext(ctx, md)
+}
+
+func fromGRPCHeader(ctx context.Context) (string, bool) {
+	// try to extract for use in grpc servers.
+	md, ok := metadata.FromIncomingContext(ctx)
+	if !ok {
+		// TODO(stevvooe): Check outgoing context?
+		return "", false
+	}
+
+	values := md[GRPCHeader]
+	if len(values) == 0 {
+		return "", false
+	}
+
+	return values[0], true
+}
diff --git a/vendor/github.com/containerd/containerd/namespaces/store.go b/vendor/github.com/containerd/containerd/namespaces/store.go
new file mode 100644
index 0000000..68ff714
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/namespaces/store.go
@@ -0,0 +1,21 @@
+package namespaces
+
+import "context"
+
+// Store provides introspection about namespaces.
+//
+// Note that these are slightly different than other objects, which are record
+// oriented. A namespace is really just a name and a set of labels. Objects
+// that belong to a namespace are returned when the namespace is assigned to a
+// given context.
+//
+//
+type Store interface {
+	Create(ctx context.Context, namespace string, labels map[string]string) error
+	Labels(ctx context.Context, namespace string) (map[string]string, error)
+	SetLabel(ctx context.Context, namespace, key, value string) error
+	List(ctx context.Context) ([]string, error)
+
+	// Delete removes the namespace. The namespace must be empty to be deleted.
+	Delete(ctx context.Context, namespace string) error
+}
diff --git a/vendor/github.com/containerd/containerd/namespaces/validate.go b/vendor/github.com/containerd/containerd/namespaces/validate.go
new file mode 100644
index 0000000..ff97a8c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/namespaces/validate.go
@@ -0,0 +1,67 @@
+// Package namespaces provides tools for working with namespaces across
+// containerd.
+//
+// Namespaces collect resources such as containers and images, into a unique
+// identifier space. This means that two applications can use the same
+// identifiers and not conflict while using containerd.
+//
+// This package can be used to ensure that client and server functions
+// correctly store the namespace on the context.
+package namespaces
+
+import (
+	"regexp"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+)
+
+const (
+	maxLength = 76
+	alpha     = `[A-Za-z]`
+	alphanum  = `[A-Za-z0-9]+`
+	label     = alpha + alphanum + `(:?[-]+` + alpha + alphanum + `)*`
+)
+
+var (
+	// namespaceRe validates that a namespace matches valid identifiers.
+	//
+	// Rules for domains, defined in RFC 1035, section 2.3.1, are used for
+	// namespaces.
+	namespaceRe = regexp.MustCompile(reAnchor(label + reGroup("[.]"+reGroup(label)) + "*"))
+)
+
+// Validate returns nil if the string s is a valid namespace.
+//
+// To allow such namespace identifiers to be used across various contexts
+// safely, the character set has been restricted to that defined for domains in
+// RFC 1035, section 2.3.1. This will make namespace identifiers safe for use
+// across networks, filesystems and other media.
+//
+// The identifier specification departs from RFC 1035 in that it allows
+// "labels" to start with number and only enforces a total length restriction
+// of 76 characters.
+//
+// While the character set may be expanded in the future, namespace identifiers
+// are guaranteed to be safely used as filesystem path components.
+//
+// For the most part, this doesn't need to be called directly when using the
+// context-oriented functions.
+func Validate(s string) error {
+	if len(s) > maxLength {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "namespace %q greater than maximum length (%d characters)", s, maxLength)
+	}
+
+	if !namespaceRe.MatchString(s) {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "namespace %q must match %v", s, namespaceRe)
+	}
+	return nil
+}
+
+func reGroup(s string) string {
+	return `(?:` + s + `)`
+}
+
+func reAnchor(s string) string {
+	return `^` + s + `$`
+}
diff --git a/vendor/github.com/containerd/containerd/osutils/fds.go b/vendor/github.com/containerd/containerd/osutils/fds.go
deleted file mode 100644
index 98fc930..0000000
--- a/vendor/github.com/containerd/containerd/osutils/fds.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !windows,!darwin
-
-package osutils
-
-import (
-	"io/ioutil"
-	"path/filepath"
-	"strconv"
-)
-
-// GetOpenFds returns the number of open fds for the process provided by pid
-func GetOpenFds(pid int) (int, error) {
-	dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd"))
-	if err != nil {
-		return -1, err
-	}
-	return len(dirs), nil
-}
diff --git a/vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go b/vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go
deleted file mode 100644
index 5310f2e..0000000
--- a/vendor/github.com/containerd/containerd/osutils/pdeathsig_linux.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// +build !solaris
-
-package osutils
-
-import (
-	"syscall"
-)
-
-// SetPDeathSig sets the parent death signal to SIGKILL so that if the
-// shim dies the container process also dies.
-func SetPDeathSig() *syscall.SysProcAttr {
-	return &syscall.SysProcAttr{
-		Pdeathsig: syscall.SIGKILL,
-	}
-}
diff --git a/vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go b/vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go
deleted file mode 100644
index 512e24b..0000000
--- a/vendor/github.com/containerd/containerd/osutils/pdeathsig_solaris.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build solaris
-
-package osutils
-
-// SetPDeathSig is a no-op on Solaris as Pdeathsig is not defined.
-func SetPDeathSig() *syscall.SysProcAttr {
-	return nil
-}
diff --git a/vendor/github.com/containerd/containerd/osutils/prctl.go b/vendor/github.com/containerd/containerd/osutils/prctl.go
deleted file mode 100644
index 1d6e251..0000000
--- a/vendor/github.com/containerd/containerd/osutils/prctl.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// +build linux
-
-// Package osutils provide access to the Get Child and Set Child prctl
-// flags.
-// See http://man7.org/linux/man-pages/man2/prctl.2.html
-package osutils
-
-import (
-	"syscall"
-	"unsafe"
-)
-
-// PR_SET_CHILD_SUBREAPER allows setting the child subreaper.
-// If arg2 is nonzero, set the "child subreaper" attribute of the
-// calling process; if arg2 is zero, unset the attribute.  When a
-// process is marked as a child subreaper, all of the children
-// that it creates, and their descendants, will be marked as
-// having a subreaper.  In effect, a subreaper fulfills the role
-// of init(1) for its descendant processes.  Upon termination of
-// a process that is orphaned (i.e., its immediate parent has
-// already terminated) and marked as having a subreaper, the
-// nearest still living ancestor subreaper will receive a SIGCHLD
-// signal and be able to wait(2) on the process to discover its
-// termination status.
-const prSetChildSubreaper = 36
-
-// PR_GET_CHILD_SUBREAPER allows retrieving the current child
-// subreaper.
-// Return the "child subreaper" setting of the caller, in the
-// location pointed to by (int *) arg2.
-const prGetChildSubreaper = 37
-
-// GetSubreaper returns the subreaper setting for the calling process
-func GetSubreaper() (int, error) {
-	var i uintptr
-	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prGetChildSubreaper, uintptr(unsafe.Pointer(&i)), 0); err != 0 {
-		return -1, err
-	}
-	return int(i), nil
-}
-
-// SetSubreaper sets the value i as the subreaper setting for the calling process
-func SetSubreaper(i int) error {
-	if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, prSetChildSubreaper, uintptr(i), 0); err != 0 {
-		return err
-	}
-	return nil
-}
diff --git a/vendor/github.com/containerd/containerd/osutils/prctl_solaris.go b/vendor/github.com/containerd/containerd/osutils/prctl_solaris.go
deleted file mode 100644
index 84da5f9..0000000
--- a/vendor/github.com/containerd/containerd/osutils/prctl_solaris.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build solaris
-
-package osutils
-
-import (
-	"errors"
-)
-
-//Solaris TODO
-
-// GetSubreaper returns the subreaper setting for the calling process
-func GetSubreaper() (int, error) {
-	return 0, errors.New("osutils GetSubreaper not implemented on Solaris")
-}
-
-// SetSubreaper sets the value i as the subreaper setting for the calling process
-func SetSubreaper(i int) error {
-	return nil
-}
diff --git a/vendor/github.com/containerd/containerd/osutils/reaper.go b/vendor/github.com/containerd/containerd/osutils/reaper.go
deleted file mode 100644
index 6a80335..0000000
--- a/vendor/github.com/containerd/containerd/osutils/reaper.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build !windows
-
-package osutils
-
-import "syscall"
-
-// Exit is the wait4 information from an exited process
-type Exit struct {
-	Pid    int
-	Status int
-}
-
-// Reap reaps all child processes for the calling process and returns their
-// exit information
-func Reap(wait bool) (exits []Exit, err error) {
-	var (
-		ws  syscall.WaitStatus
-		rus syscall.Rusage
-	)
-	flag := syscall.WNOHANG
-	if wait {
-		flag = 0
-	}
-	for {
-		pid, err := syscall.Wait4(-1, &ws, flag, &rus)
-		if err != nil {
-			if err == syscall.ECHILD {
-				return exits, nil
-			}
-			return exits, err
-		}
-		if pid <= 0 {
-			return exits, nil
-		}
-		exits = append(exits, Exit{
-			Pid:    pid,
-			Status: exitStatus(ws),
-		})
-	}
-}
-
-const exitSignalOffset = 128
-
-// exitStatus returns the correct exit status for a process based on if it
-// was signaled or exited cleanly
-func exitStatus(status syscall.WaitStatus) int {
-	if status.Signaled() {
-		return exitSignalOffset + int(status.Signal())
-	}
-	return status.ExitStatus()
-}
diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go
new file mode 100644
index 0000000..bd66e25
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/platforms/database.go
@@ -0,0 +1,77 @@
+package platforms
+
+import (
+	"runtime"
+	"strings"
+)
+
+// These function are generated from from https://golang.org/src/go/build/syslist.go.
+//
+// We use switch statements because they are slightly faster than map lookups
+// and use a little less memory.
+
+// isKnownOS returns true if we know about the operating system.
+//
+// The OS value should be normalized before calling this function.
+func isKnownOS(os string) bool {
+	switch os {
+	case "android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
+		return true
+	}
+	return false
+}
+
+// isKnownArch returns true if we know about the architecture.
+//
+// The arch value should be normalized before being passed to this function.
+func isKnownArch(arch string) bool {
+	switch arch {
+	case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "s390", "s390x", "sparc", "sparc64":
+		return true
+	}
+	return false
+}
+
+func normalizeOS(os string) string {
+	if os == "" {
+		return runtime.GOOS
+	}
+	os = strings.ToLower(os)
+
+	switch os {
+	case "macos":
+		os = "darwin"
+	}
+	return os
+}
+
+// normalizeArch normalizes the architecture.
+func normalizeArch(arch, variant string) (string, string) {
+	arch, variant = strings.ToLower(arch), strings.ToLower(variant)
+	switch arch {
+	case "i386":
+		arch = "386"
+		variant = ""
+	case "x86_64", "x86-64":
+		arch = "amd64"
+		variant = ""
+	case "aarch64":
+		arch = "arm64"
+		variant = "" // v8 is implied
+	case "armhf":
+		arch = "arm"
+		variant = ""
+	case "armel":
+		arch = "arm"
+		variant = "v6"
+	case "arm":
+		switch variant {
+		case "v7", "7":
+			variant = "v7"
+		case "5", "6", "8":
+			variant = "v" + variant
+		}
+	}
+
+	return arch, variant
+}
diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go
new file mode 100644
index 0000000..2b57b49
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/platforms/defaults.go
@@ -0,0 +1,21 @@
+package platforms
+
+import (
+	"runtime"
+
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Default returns the default specifier for the platform.
+func Default() string {
+	return Format(DefaultSpec())
+}
+
+// DefaultSpec returns the current platform's default platform specification.
+func DefaultSpec() specs.Platform {
+	return specs.Platform{
+		OS:           runtime.GOOS,
+		Architecture: runtime.GOARCH,
+		// TODO(stevvooe): Need to resolve GOARM for arm hosts.
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go
new file mode 100644
index 0000000..56c6ddc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/platforms/platforms.go
@@ -0,0 +1,236 @@
+// Package platforms provides a toolkit for normalizing, matching and
+// specifying container platforms.
+//
+// Centered around OCI platform specifications, we define a string-based
+// specifier syntax that can be used for user input. With a specifier, users
+// only need to specify the parts of the platform that are relevant to their
+// context, providing an operating system or architecture or both.
+//
+// How do I use this package?
+//
+// The vast majority of use cases should simply use the match function with
+// user input. The first step is to parse a specifier into a matcher:
+//
+//   m, err := Parse("linux")
+//   if err != nil { ... }
+//
+// Once you have a matcher, use it to match against the platform declared by a
+// component, typically from an image or runtime. Since extracting an images
+// platform is a little more involved, we'll use an example against the
+// platform default:
+//
+//   if ok := m.Match(Default()); !ok { /* doesn't match */ }
+//
+// This can be composed in loops for resolving runtimes or used as a filter for
+// fetch and select images.
+//
+// More details of the specifier syntax and platform spec follow.
+//
+// Declaring Platform Support
+//
+// Components that have strict platform requirements should use the OCI
+// platform specification to declare their support. Typically, this will be
+// images and runtimes that should make these declaring which platform they
+// support specifically. This looks roughly as follows:
+//
+//   type Platform struct {
+//	   Architecture string
+//	   OS           string
+//	   Variant      string
+//   }
+//
+// Most images and runtimes should at least set Architecture and OS, according
+// to their GOARCH and GOOS values, respectively (follow the OCI image
+// specification when in doubt). ARM should set variant under certain
+// discussions, which are outlined below.
+//
+// Platform Specifiers
+//
+// While the OCI platform specifications provide a tool for components to
+// specify structured information, user input typically doesn't need the full
+// context and much can be inferred. To solve this problem, we introduced
+// "specifiers". A specifier has the format
+// `<os>|<arch>|<os>/<arch>[/<variant>]`.  The user can provide either the
+// operating system or the architecture or both.
+//
+// An example of a common specifier is `linux/amd64`. If the host has a default
+// of runtime that matches this, the user can simply provide the component that
+// matters. For example, if a image provides amd64 and arm64 support, the
+// operating system, `linux` can be inferred, so they only have to provide
+// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
+// where the architecture may be known but a runtime may support images from
+// different operating systems.
+//
+// Normalization
+//
+// Because not all users are familiar with the way the Go runtime represents
+// platforms, several normalizations have been provided to make this package
+// easier to user.
+//
+// The following are performed for architectures:
+//
+//   Value    Normalized
+//   aarch64  arm64
+//   armhf    arm
+//   armel    arm/v6
+//   i386     386
+//   x86_64   amd64
+//   x86-64   amd64
+//
+// We also normalize the operating system `macos` to `darwin`.
+//
+// ARM Support
+//
+// To qualify ARM architecture, the Variant field is used to qualify the arm
+// version. The most common arm version, v7, is represented without the variant
+// unless it is explicitly provided. This is treated as equivalent to armhf. A
+// previous architecture, armel, will be normalized to arm/v6.
+//
+// While these normalizations are provided, their support on arm platforms has
+// not yet been fully implemented and tested.
+package platforms
+
+import (
+	"regexp"
+	"runtime"
+	"strings"
+
+	"github.com/containerd/containerd/errdefs"
+	specs "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+var (
+	specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
+)
+
+// Matcher matches platforms specifications, provided by an image or runtime.
+type Matcher interface {
+	Spec() specs.Platform
+	Match(platform specs.Platform) bool
+}
+
+type matcher struct {
+	specs.Platform
+}
+
+func (m *matcher) Spec() specs.Platform {
+	return m.Platform
+}
+
+func (m *matcher) Match(platform specs.Platform) bool {
+	normalized := Normalize(platform)
+	return m.OS == normalized.OS &&
+		m.Architecture == normalized.Architecture &&
+		m.Variant == normalized.Variant
+}
+
+func (m *matcher) String() string {
+	return Format(m.Platform)
+}
+
+// Parse parses the platform specifier syntax into a platform declaration.
+//
+// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`.
+// The minimum required information for a platform specifier is the operating
+// system or architecture. If there is only a single string (no slashes), the
+// value will be matched against the known set of operating systems, then fall
+// back to the known set of architectures. The missing component will be
+// inferred based on the local environment.
+//
+// Applications should opt to use `Match` over directly parsing specifiers.
+func Parse(specifier string) (Matcher, error) {
+	if strings.Contains(specifier, "*") {
+		// TODO(stevvooe): need to work out exact wildcard handling
+		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
+	}
+
+	parts := strings.Split(specifier, "/")
+
+	for _, part := range parts {
+		if !specifierRe.MatchString(part) {
+			return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
+		}
+	}
+
+	var p specs.Platform
+	switch len(parts) {
+	case 1:
+		// in this case, we will test that the value might be an OS, then look
+		// it up. If it is not known, we'll treat it as an architecture. Since
+		// we have very little information about the platform here, we are
+		// going to be a little more strict if we don't know about the argument
+		// value.
+		p.OS = normalizeOS(parts[0])
+		if isKnownOS(p.OS) {
+			// picks a default architecture
+			p.Architecture = runtime.GOARCH
+			if p.Architecture == "arm" {
+				// TODO(stevvooe): Resolve arm variant, if not v6 (default)
+				return nil, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented")
+			}
+
+			return &matcher{p}, nil
+		}
+
+		p.Architecture, p.Variant = normalizeArch(parts[0], "")
+		if isKnownArch(p.Architecture) {
+			p.OS = runtime.GOOS
+			return &matcher{p}, nil
+		}
+
+		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
+	case 2:
+		// In this case, we treat as a regular os/arch pair. We don't care
+		// about whether or not we know of the platform.
+		p.OS = normalizeOS(parts[0])
+		p.Architecture, p.Variant = normalizeArch(parts[1], "")
+
+		return &matcher{p}, nil
+	case 3:
+		// we have a fully specified variant, this is rare
+		p.OS = normalizeOS(parts[0])
+		p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
+
+		return &matcher{p}, nil
+	}
+
+	return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
+}
+
+// Format returns a string specifier from the provided platform specification.
+func Format(platform specs.Platform) string {
+	if platform.OS == "" {
+		return "unknown"
+	}
+
+	return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
+}
+
+func joinNotEmpty(s ...string) string {
+	var ss []string
+	for _, s := range s {
+		if s == "" {
+			continue
+		}
+
+		ss = append(ss, s)
+	}
+
+	return strings.Join(ss, "/")
+}
+
+// Normalize validates and translate the platform to the canonical value.
+//
+// For example, if "Aarch64" is encountered, we change it to "arm64" or if
+// "x86_64" is encountered, it becomes "amd64".
+func Normalize(platform specs.Platform) specs.Platform {
+	platform.OS = normalizeOS(platform.OS)
+	platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
+
+	// these fields are deprecated, remove them
+	platform.OSFeatures = nil
+	platform.OSVersion = ""
+
+	return platform
+}
diff --git a/vendor/github.com/containerd/containerd/plugin/context.go b/vendor/github.com/containerd/containerd/plugin/context.go
new file mode 100644
index 0000000..7fff5c6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/plugin/context.go
@@ -0,0 +1,124 @@
+package plugin
+
+import (
+	"context"
+	"path/filepath"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/log"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+// InitContext is used for plugin inititalization
+type InitContext struct {
+	Context context.Context
+	Root    string
+	State   string
+	Config  interface{}
+	Address string
+	Events  *events.Exchange
+
+	Meta *Meta // plugins can fill in metadata at init.
+
+	plugins *PluginSet
+}
+
+// NewContext returns a new plugin InitContext
+func NewContext(ctx context.Context, r *Registration, plugins *PluginSet, root, state string) *InitContext {
+	return &InitContext{
+		Context: log.WithModule(ctx, r.URI()),
+		Root:    filepath.Join(root, r.URI()),
+		State:   filepath.Join(state, r.URI()),
+		Meta: &Meta{
+			Exports: map[string]string{},
+		},
+		plugins: plugins,
+	}
+}
+
+// Get returns the first plugin by its type
+func (i *InitContext) Get(t Type) (interface{}, error) {
+	return i.plugins.Get(t)
+}
+
+// Meta contains information gathered from the registration and initialization
+// process.
+type Meta struct {
+	Platforms    []ocispec.Platform // platforms supported by plugin
+	Exports      map[string]string  // values exported by plugin
+	Capabilities []string           // feature switches for plugin
+}
+
+// Plugin represents an initialized plugin, used with an init context.
+type Plugin struct {
+	Registration *Registration // registration, as initialized
+	Config       interface{}   // config, as initialized
+	Meta         *Meta
+
+	instance interface{}
+	err      error // will be set if there was an error initializing the plugin
+}
+
+func (p *Plugin) Err() error {
+	return p.err
+}
+
+func (p *Plugin) Instance() (interface{}, error) {
+	return p.instance, p.err
+}
+
+// PluginSet defines a plugin collection, used with InitContext.
+//
+// This maintains ordering and unique indexing over the set.
+//
+// After iteratively instantiating plugins, this set should represent, the
+// ordered, initialization set of plugins for a containerd instance.
+type PluginSet struct {
+	ordered     []*Plugin // order of initialization
+	byTypeAndID map[Type]map[string]*Plugin
+}
+
+func NewPluginSet() *PluginSet {
+	return &PluginSet{
+		byTypeAndID: make(map[Type]map[string]*Plugin),
+	}
+}
+
+func (ps *PluginSet) Add(p *Plugin) error {
+	if byID, typeok := ps.byTypeAndID[p.Registration.Type]; !typeok {
+		ps.byTypeAndID[p.Registration.Type] = map[string]*Plugin{
+			p.Registration.ID: p,
+		}
+	} else if _, idok := byID[p.Registration.ID]; !idok {
+		byID[p.Registration.ID] = p
+	} else {
+		return errors.Wrapf(errdefs.ErrAlreadyExists, "plugin %v already initialized", p.Registration.URI())
+	}
+
+	ps.ordered = append(ps.ordered, p)
+	return nil
+}
+
+// Get returns the first plugin by its type
+func (ps *PluginSet) Get(t Type) (interface{}, error) {
+	for _, v := range ps.byTypeAndID[t] {
+		return v.Instance()
+	}
+	return nil, errors.Wrapf(errdefs.ErrNotFound, "no plugins registered for %s", t)
+}
+
+func (i *InitContext) GetAll() []*Plugin {
+	return i.plugins.ordered
+}
+
+// GetByType returns all plugins with the specific type.
+func (i *InitContext) GetByType(t Type) (map[string]*Plugin, error) {
+	p, ok := i.plugins.byTypeAndID[t]
+	if !ok {
+		return nil, errors.Wrapf(errdefs.ErrNotFound, "no plugins registered for %s", t)
+	}
+
+	return p, nil
+}
diff --git a/vendor/github.com/containerd/containerd/plugin/plugin.go b/vendor/github.com/containerd/containerd/plugin/plugin.go
new file mode 100644
index 0000000..d7b1c0a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/plugin/plugin.go
@@ -0,0 +1,165 @@
+package plugin
+
+import (
+	"fmt"
+	"sync"
+
+	"github.com/pkg/errors"
+	"google.golang.org/grpc"
+)
+
+var (
+	// ErrNoType is returned when no type is specified
+	ErrNoType = errors.New("plugin: no type")
+	// ErrNoPluginID is returned when no id is specified
+	ErrNoPluginID = errors.New("plugin: no id")
+
+	// ErrSkipPlugin is used when a plugin is not initialized and should not be loaded,
+	// this allows the plugin loader differentiate between a plugin which is configured
+	// not to load and one that fails to load.
+	ErrSkipPlugin = errors.New("skip plugin")
+
+	// ErrInvalidRequires will be thrown if the requirements for a plugin are
+	// defined in an invalid manner.
+	ErrInvalidRequires = errors.New("invalid requires")
+)
+
+// IsSkipPlugin returns true if the error is skipping the plugin
+func IsSkipPlugin(err error) bool {
+	if errors.Cause(err) == ErrSkipPlugin {
+		return true
+	}
+	return false
+}
+
+// Type is the type of the plugin
+type Type string
+
+func (t Type) String() string { return string(t) }
+
+const (
+	// AllPlugins declares that the plugin should be initialized after all others.
+	AllPlugins Type = "*"
+	// RuntimePlugin implements a runtime
+	RuntimePlugin Type = "io.containerd.runtime.v1"
+	// GRPCPlugin implements a grpc service
+	GRPCPlugin Type = "io.containerd.grpc.v1"
+	// SnapshotPlugin implements a snapshotter
+	SnapshotPlugin Type = "io.containerd.snapshotter.v1"
+	// TaskMonitorPlugin implements a task monitor
+	TaskMonitorPlugin Type = "io.containerd.monitor.v1"
+	// DiffPlugin implements a differ
+	DiffPlugin Type = "io.containerd.differ.v1"
+	// MetadataPlugin implements a metadata store
+	MetadataPlugin Type = "io.containerd.metadata.v1"
+	// ContentPlugin implements a content store
+	ContentPlugin Type = "io.containerd.content.v1"
+)
+
+// Registration contains information for registering a plugin
+type Registration struct {
+	Type     Type
+	ID       string
+	Config   interface{}
+	Requires []Type
+
+	// InitFn is called when initializing a plugin. The registration and
+	// context are passed in. The init function may modify the registration to
+	// add exports, capabilites and platform support declarations.
+	InitFn func(*InitContext) (interface{}, error)
+}
+
+func (r *Registration) Init(ic *InitContext) *Plugin {
+	p, err := r.InitFn(ic)
+	return &Plugin{
+		Registration: r,
+		Config:       ic.Config,
+		Meta:         ic.Meta,
+		instance:     p,
+		err:          err,
+	}
+}
+
+// URI returns the full plugin URI
+func (r *Registration) URI() string {
+	return fmt.Sprintf("%s.%s", r.Type, r.ID)
+}
+
+// Service allows GRPC services to be registered with the underlying server
+type Service interface {
+	Register(*grpc.Server) error
+}
+
+var register = struct {
+	sync.RWMutex
+	r []*Registration
+}{}
+
+// Load loads all plugins at the provided path into containerd
+func Load(path string) (err error) {
+	defer func() {
+		if v := recover(); v != nil {
+			rerr, ok := v.(error)
+			if !ok {
+				rerr = fmt.Errorf("%s", v)
+			}
+			err = rerr
+		}
+	}()
+	return loadPlugins(path)
+}
+
+// Register allows plugins to register
+func Register(r *Registration) {
+	register.Lock()
+	defer register.Unlock()
+	if r.Type == "" {
+		panic(ErrNoType)
+	}
+	if r.ID == "" {
+		panic(ErrNoPluginID)
+	}
+
+	var last bool
+	for _, requires := range r.Requires {
+		if requires == "*" {
+			last = true
+		}
+	}
+	if last && len(r.Requires) != 1 {
+		panic(ErrInvalidRequires)
+	}
+
+	register.r = append(register.r, r)
+}
+
+// Graph returns an ordered list of registered plugins for initialization
+func Graph() (ordered []*Registration) {
+	register.RLock()
+	defer register.RUnlock()
+
+	added := map[*Registration]bool{}
+	for _, r := range register.r {
+
+		children(r.ID, r.Requires, added, &ordered)
+		if !added[r] {
+			ordered = append(ordered, r)
+			added[r] = true
+		}
+	}
+	return ordered
+}
+
+func children(id string, types []Type, added map[*Registration]bool, ordered *[]*Registration) {
+	for _, t := range types {
+		for _, r := range register.r {
+			if r.ID != id && (t == "*" || r.Type == t) {
+				children(r.ID, r.Requires, added, ordered)
+				if !added[r] {
+					*ordered = append(*ordered, r)
+					added[r] = true
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_go18.go b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go
new file mode 100644
index 0000000..d910124
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/plugin/plugin_go18.go
@@ -0,0 +1,46 @@
+// +build go1.8,!windows,amd64
+
+package plugin
+
+import (
+	"fmt"
+	"path/filepath"
+	"plugin"
+	"runtime"
+)
+
+// loadPlugins loads all plugins for the OS and Arch
+// that containerd is built for inside the provided path
+func loadPlugins(path string) error {
+	abs, err := filepath.Abs(path)
+	if err != nil {
+		return err
+	}
+	pattern := filepath.Join(abs, fmt.Sprintf(
+		"*-%s-%s.%s",
+		runtime.GOOS,
+		runtime.GOARCH,
+		getLibExt(),
+	))
+	libs, err := filepath.Glob(pattern)
+	if err != nil {
+		return err
+	}
+	for _, lib := range libs {
+		if _, err := plugin.Open(lib); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// getLibExt returns a platform specific lib extension for
+// the platform that containerd is running on
+func getLibExt() string {
+	switch runtime.GOOS {
+	case "windows":
+		return "dll"
+	default:
+		return "so"
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/plugin/plugin_other.go b/vendor/github.com/containerd/containerd/plugin/plugin_other.go
new file mode 100644
index 0000000..21a4570
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/plugin/plugin_other.go
@@ -0,0 +1,8 @@
+// +build !go1.8 windows !amd64
+
+package plugin
+
+func loadPlugins(path string) error {
+	// plugins not supported until 1.8
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go
new file mode 100644
index 0000000..e51367a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/process.go
@@ -0,0 +1,211 @@
+package containerd
+
+import (
+	"context"
+	"strings"
+	"syscall"
+	"time"
+
+	"github.com/containerd/containerd/api/services/tasks/v1"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+)
+
+// Process represents a system process
+type Process interface {
+	// Pid is the system specific process id
+	Pid() uint32
+	// Start starts the process executing the user's defined binary
+	Start(context.Context) error
+	// Delete removes the process and any resources allocated returning the exit status
+	Delete(context.Context, ...ProcessDeleteOpts) (*ExitStatus, error)
+	// Kill sends the provided signal to the process
+	Kill(context.Context, syscall.Signal, ...KillOpts) error
+	// Wait asynchronously waits for the process to exit, and sends the exit code to the returned channel
+	Wait(context.Context) (<-chan ExitStatus, error)
+	// CloseIO allows various pipes to be closed on the process
+	CloseIO(context.Context, ...IOCloserOpts) error
+	// Resize changes the width and heigh of the process's terminal
+	Resize(ctx context.Context, w, h uint32) error
+	// IO returns the io set for the process
+	IO() IO
+	// Status returns the executing status of the process
+	Status(context.Context) (Status, error)
+}
+
+// ExitStatus encapsulates a process' exit status.
+// It is used by `Wait()` to return either a process exit code or an error
+type ExitStatus struct {
+	code     uint32
+	exitedAt time.Time
+	err      error
+}
+
+// Result returns the exit code and time of the exit status.
+// An error may be returned here to which indicates there was an error
+//   at some point while waiting for the exit status. It does not signify
+//   an error with the process itself.
+// If an error is returned, the process may still be running.
+func (s ExitStatus) Result() (uint32, time.Time, error) {
+	return s.code, s.exitedAt, s.err
+}
+
+// ExitCode returns the exit code of the process.
+// This is only valid is Error() returns nil
+func (s ExitStatus) ExitCode() uint32 {
+	return s.code
+}
+
+// ExitTime returns the exit time of the process
+// This is only valid is Error() returns nil
+func (s ExitStatus) ExitTime() time.Time {
+	return s.exitedAt
+}
+
+// Error returns the error, if any, that occured while waiting for the
+// process.
+func (s ExitStatus) Error() error {
+	return s.err
+}
+
+type process struct {
+	id   string
+	task *task
+	pid  uint32
+	io   IO
+}
+
+func (p *process) ID() string {
+	return p.id
+}
+
+// Pid returns the pid of the process
+// The pid is not set until start is called and returns
+func (p *process) Pid() uint32 {
+	return p.pid
+}
+
+// Start starts the exec process
+func (p *process) Start(ctx context.Context) error {
+	r, err := p.task.client.TaskService().Start(ctx, &tasks.StartRequest{
+		ContainerID: p.task.id,
+		ExecID:      p.id,
+	})
+	if err != nil {
+		p.io.Cancel()
+		p.io.Wait()
+		p.io.Close()
+		return errdefs.FromGRPC(err)
+	}
+	p.pid = r.Pid
+	return nil
+}
+
+func (p *process) Kill(ctx context.Context, s syscall.Signal, opts ...KillOpts) error {
+	var i KillInfo
+	for _, o := range opts {
+		if err := o(ctx, p, &i); err != nil {
+			return err
+		}
+	}
+	_, err := p.task.client.TaskService().Kill(ctx, &tasks.KillRequest{
+		Signal:      uint32(s),
+		ContainerID: p.task.id,
+		ExecID:      p.id,
+		All:         i.All,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (p *process) Wait(ctx context.Context) (<-chan ExitStatus, error) {
+	c := make(chan ExitStatus, 1)
+	go func() {
+		defer close(c)
+		r, err := p.task.client.TaskService().Wait(ctx, &tasks.WaitRequest{
+			ContainerID: p.task.id,
+			ExecID:      p.id,
+		})
+		if err != nil {
+			c <- ExitStatus{
+				code: UnknownExitStatus,
+				err:  err,
+			}
+			return
+		}
+		c <- ExitStatus{
+			code:     r.ExitStatus,
+			exitedAt: r.ExitedAt,
+		}
+	}()
+	return c, nil
+}
+
+func (p *process) CloseIO(ctx context.Context, opts ...IOCloserOpts) error {
+	r := &tasks.CloseIORequest{
+		ContainerID: p.task.id,
+		ExecID:      p.id,
+	}
+	var i IOCloseInfo
+	for _, o := range opts {
+		o(&i)
+	}
+	r.Stdin = i.Stdin
+	_, err := p.task.client.TaskService().CloseIO(ctx, r)
+	return errdefs.FromGRPC(err)
+}
+
+func (p *process) IO() IO {
+	return p.io
+}
+
+func (p *process) Resize(ctx context.Context, w, h uint32) error {
+	_, err := p.task.client.TaskService().ResizePty(ctx, &tasks.ResizePtyRequest{
+		ContainerID: p.task.id,
+		Width:       w,
+		Height:      h,
+		ExecID:      p.id,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStatus, error) {
+	for _, o := range opts {
+		if err := o(ctx, p); err != nil {
+			return nil, err
+		}
+	}
+	status, err := p.Status(ctx)
+	if err != nil {
+		return nil, err
+	}
+	switch status.Status {
+	case Running, Paused, Pausing:
+		return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "process must be stopped before deletion")
+	}
+	r, err := p.task.client.TaskService().DeleteProcess(ctx, &tasks.DeleteProcessRequest{
+		ContainerID: p.task.id,
+		ExecID:      p.id,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	if p.io != nil {
+		p.io.Wait()
+		p.io.Close()
+	}
+	return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
+}
+
+func (p *process) Status(ctx context.Context) (Status, error) {
+	r, err := p.task.client.TaskService().Get(ctx, &tasks.GetRequest{
+		ContainerID: p.task.id,
+		ExecID:      p.id,
+	})
+	if err != nil {
+		return Status{}, errdefs.FromGRPC(err)
+	}
+	return Status{
+		Status:     ProcessStatus(strings.ToLower(r.Process.Status.String())),
+		ExitStatus: r.Process.ExitStatus,
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/README.md b/vendor/github.com/containerd/containerd/protobuf/google/rpc/README.md
new file mode 100644
index 0000000..b7f6bf0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/README.md
@@ -0,0 +1,18 @@
+This package copies definitions used with GRPC to represent error conditions
+within GRPC data types. These files are licensed under the provisions outlined
+at the top of each file.
+
+## `containerd`
+
+This is moved from the [googleapis
+project](https://github.com/googleapis/googleapis/tree/master/google/rpc) to
+allow us to regenerate these types for use with gogoprotobuf. We can move this
+away if google can generate these sensibly.
+
+These files were imported from changes after
+7f47d894837ac1701ee555fd5c3d70e5d4a796b1. Updates should not be required.
+
+The other option is to get these into an upstream project, like gogoprotobuf.
+
+Note that the `go_package` option has been changed so that they generate
+correctly in a common package in the containerd project.
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go
new file mode 100644
index 0000000..74537b7
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.pb.go
@@ -0,0 +1,270 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/protobuf/google/rpc/code.proto
+// DO NOT EDIT!
+
+/*
+Package rpc is a generated protocol buffer package.
+
+It is generated from these files:
+	github.com/containerd/containerd/protobuf/google/rpc/code.proto
+	github.com/containerd/containerd/protobuf/google/rpc/error_details.proto
+	github.com/containerd/containerd/protobuf/google/rpc/status.proto
+
+It has these top-level messages:
+	RetryInfo
+	DebugInfo
+	QuotaFailure
+	PreconditionFailure
+	BadRequest
+	RequestInfo
+	ResourceInfo
+	Help
+	LocalizedMessage
+	Status
+*/
+package rpc
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+// The canonical error codes for Google APIs.
+//
+//
+// Sometimes multiple error codes may apply.  Services should return
+// the most specific error code that applies.  For example, prefer
+// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
+// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
+type Code int32
+
+const (
+	// Not an error; returned on success
+	//
+	// HTTP Mapping: 200 OK
+	Code_OK Code = 0
+	// The operation was cancelled, typically by the caller.
+	//
+	// HTTP Mapping: 499 Client Closed Request
+	Code_CANCELLED Code = 1
+	// Unknown error.  For example, this error may be returned when
+	// a `Status` value received from another address space belongs to
+	// an error space that is not known in this address space.  Also
+	// errors raised by APIs that do not return enough error information
+	// may be converted to this error.
+	//
+	// HTTP Mapping: 500 Internal Server Error
+	Code_UNKNOWN Code = 2
+	// The client specified an invalid argument.  Note that this differs
+	// from `FAILED_PRECONDITION`.  `INVALID_ARGUMENT` indicates arguments
+	// that are problematic regardless of the state of the system
+	// (e.g., a malformed file name).
+	//
+	// HTTP Mapping: 400 Bad Request
+	Code_INVALID_ARGUMENT Code = 3
+	// The deadline expired before the operation could complete. For operations
+	// that change the state of the system, this error may be returned
+	// even if the operation has completed successfully.  For example, a
+	// successful response from a server could have been delayed long
+	// enough for the deadline to expire.
+	//
+	// HTTP Mapping: 504 Gateway Timeout
+	Code_DEADLINE_EXCEEDED Code = 4
+	// Some requested entity (e.g., file or directory) was not found.
+	//
+	// Note to server developers: if a request is denied for an entire class
+	// of users, such as gradual feature rollout or undocumented whitelist,
+	// `NOT_FOUND` may be used. If a request is denied for some users within
+	// a class of users, such as user-based access control, `PERMISSION_DENIED`
+	// must be used.
+	//
+	// HTTP Mapping: 404 Not Found
+	Code_NOT_FOUND Code = 5
+	// The entity that a client attempted to create (e.g., file or directory)
+	// already exists.
+	//
+	// HTTP Mapping: 409 Conflict
+	Code_ALREADY_EXISTS Code = 6
+	// The caller does not have permission to execute the specified
+	// operation. `PERMISSION_DENIED` must not be used for rejections
+	// caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
+	// instead for those errors). `PERMISSION_DENIED` must not be
+	// used if the caller can not be identified (use `UNAUTHENTICATED`
+	// instead for those errors). This error code does not imply the
+	// request is valid or the requested entity exists or satisfies
+	// other pre-conditions.
+	//
+	// HTTP Mapping: 403 Forbidden
+	Code_PERMISSION_DENIED Code = 7
+	// The request does not have valid authentication credentials for the
+	// operation.
+	//
+	// HTTP Mapping: 401 Unauthorized
+	Code_UNAUTHENTICATED Code = 16
+	// Some resource has been exhausted, perhaps a per-user quota, or
+	// perhaps the entire file system is out of space.
+	//
+	// HTTP Mapping: 429 Too Many Requests
+	Code_RESOURCE_EXHAUSTED Code = 8
+	// The operation was rejected because the system is not in a state
+	// required for the operation's execution.  For example, the directory
+	// to be deleted is non-empty, an rmdir operation is applied to
+	// a non-directory, etc.
+	//
+	// Service implementors can use the following guidelines to decide
+	// between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
+	//  (a) Use `UNAVAILABLE` if the client can retry just the failing call.
+	//  (b) Use `ABORTED` if the client should retry at a higher level
+	//      (e.g., when a client-specified test-and-set fails, indicating the
+	//      client should restart a read-modify-write sequence).
+	//  (c) Use `FAILED_PRECONDITION` if the client should not retry until
+	//      the system state has been explicitly fixed.  E.g., if an "rmdir"
+	//      fails because the directory is non-empty, `FAILED_PRECONDITION`
+	//      should be returned since the client should not retry unless
+	//      the files are deleted from the directory.
+	//
+	// HTTP Mapping: 400 Bad Request
+	Code_FAILED_PRECONDITION Code = 9
+	// The operation was aborted, typically due to a concurrency issue such as
+	// a sequencer check failure or transaction abort.
+	//
+	// See the guidelines above for deciding between `FAILED_PRECONDITION`,
+	// `ABORTED`, and `UNAVAILABLE`.
+	//
+	// HTTP Mapping: 409 Conflict
+	Code_ABORTED Code = 10
+	// The operation was attempted past the valid range.  E.g., seeking or
+	// reading past end-of-file.
+	//
+	// Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
+	// be fixed if the system state changes. For example, a 32-bit file
+	// system will generate `INVALID_ARGUMENT` if asked to read at an
+	// offset that is not in the range [0,2^32-1], but it will generate
+	// `OUT_OF_RANGE` if asked to read from an offset past the current
+	// file size.
+	//
+	// There is a fair bit of overlap between `FAILED_PRECONDITION` and
+	// `OUT_OF_RANGE`.  We recommend using `OUT_OF_RANGE` (the more specific
+	// error) when it applies so that callers who are iterating through
+	// a space can easily look for an `OUT_OF_RANGE` error to detect when
+	// they are done.
+	//
+	// HTTP Mapping: 400 Bad Request
+	Code_OUT_OF_RANGE Code = 11
+	// The operation is not implemented or is not supported/enabled in this
+	// service.
+	//
+	// HTTP Mapping: 501 Not Implemented
+	Code_UNIMPLEMENTED Code = 12
+	// Internal errors.  This means that some invariants expected by the
+	// underlying system have been broken.  This error code is reserved
+	// for serious errors.
+	//
+	// HTTP Mapping: 500 Internal Server Error
+	Code_INTERNAL Code = 13
+	// The service is currently unavailable.  This is most likely a
+	// transient condition, which can be corrected by retrying with
+	// a backoff.
+	//
+	// See the guidelines above for deciding between `FAILED_PRECONDITION`,
+	// `ABORTED`, and `UNAVAILABLE`.
+	//
+	// HTTP Mapping: 503 Service Unavailable
+	Code_UNAVAILABLE Code = 14
+	// Unrecoverable data loss or corruption.
+	//
+	// HTTP Mapping: 500 Internal Server Error
+	Code_DATA_LOSS Code = 15
+)
+
+var Code_name = map[int32]string{
+	0:  "OK",
+	1:  "CANCELLED",
+	2:  "UNKNOWN",
+	3:  "INVALID_ARGUMENT",
+	4:  "DEADLINE_EXCEEDED",
+	5:  "NOT_FOUND",
+	6:  "ALREADY_EXISTS",
+	7:  "PERMISSION_DENIED",
+	16: "UNAUTHENTICATED",
+	8:  "RESOURCE_EXHAUSTED",
+	9:  "FAILED_PRECONDITION",
+	10: "ABORTED",
+	11: "OUT_OF_RANGE",
+	12: "UNIMPLEMENTED",
+	13: "INTERNAL",
+	14: "UNAVAILABLE",
+	15: "DATA_LOSS",
+}
+var Code_value = map[string]int32{
+	"OK":                  0,
+	"CANCELLED":           1,
+	"UNKNOWN":             2,
+	"INVALID_ARGUMENT":    3,
+	"DEADLINE_EXCEEDED":   4,
+	"NOT_FOUND":           5,
+	"ALREADY_EXISTS":      6,
+	"PERMISSION_DENIED":   7,
+	"UNAUTHENTICATED":     16,
+	"RESOURCE_EXHAUSTED":  8,
+	"FAILED_PRECONDITION": 9,
+	"ABORTED":             10,
+	"OUT_OF_RANGE":        11,
+	"UNIMPLEMENTED":       12,
+	"INTERNAL":            13,
+	"UNAVAILABLE":         14,
+	"DATA_LOSS":           15,
+}
+
+func (x Code) String() string {
+	return proto.EnumName(Code_name, int32(x))
+}
+func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptorCode, []int{0} }
+
+func init() {
+	proto.RegisterEnum("google.rpc.Code", Code_name, Code_value)
+}
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/protobuf/google/rpc/code.proto", fileDescriptorCode)
+}
+
+var fileDescriptorCode = []byte{
+	// 405 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xbb, 0x72, 0x13, 0x31,
+	0x14, 0x86, 0xbd, 0x4e, 0x70, 0x62, 0xf9, 0x76, 0xa2, 0x70, 0xe9, 0xf6, 0x01, 0x28, 0xec, 0x82,
+	0x86, 0x19, 0x0a, 0xe6, 0x78, 0x75, 0x9c, 0x68, 0x22, 0x1f, 0xed, 0x68, 0xa5, 0x10, 0x68, 0x76,
+	0xf0, 0xda, 0x98, 0xcc, 0x90, 0xac, 0x67, 0xc7, 0xee, 0x79, 0x16, 0x5e, 0x82, 0x57, 0x48, 0x49,
+	0x49, 0x49, 0xfc, 0x24, 0x8c, 0x4c, 0x01, 0x35, 0x9d, 0xe6, 0xff, 0x75, 0x2e, 0xff, 0x77, 0xc4,
+	0xdb, 0xf5, 0xed, 0xf6, 0xf3, 0x6e, 0x31, 0xae, 0xea, 0xbb, 0x49, 0x55, 0xdf, 0x6f, 0x3f, 0xde,
+	0xde, 0xaf, 0x9a, 0xe5, 0xbf, 0xcf, 0x4d, 0x53, 0x6f, 0xeb, 0xc5, 0xee, 0xd3, 0x64, 0x5d, 0xd7,
+	0xeb, 0x2f, 0xab, 0x49, 0xb3, 0xa9, 0x26, 0x55, 0xbd, 0x5c, 0x8d, 0x0f, 0x86, 0x14, 0x7f, 0xe4,
+	0x71, 0xb3, 0xa9, 0x5e, 0x7e, 0x6f, 0x8b, 0xe3, 0xac, 0x5e, 0xae, 0x64, 0x47, 0xb4, 0xed, 0x15,
+	0xb4, 0xe4, 0x40, 0x74, 0x33, 0xe4, 0x8c, 0x8c, 0x21, 0x05, 0x89, 0xec, 0x89, 0x93, 0xc0, 0x57,
+	0x6c, 0xdf, 0x31, 0xb4, 0xe5, 0x53, 0x01, 0x9a, 0xaf, 0xd1, 0x68, 0x55, 0xa2, 0xbb, 0x08, 0x73,
+	0x62, 0x0f, 0x47, 0xf2, 0x99, 0x38, 0x53, 0x84, 0xca, 0x68, 0xa6, 0x92, 0x6e, 0x32, 0x22, 0x45,
+	0x0a, 0x8e, 0x63, 0x23, 0xb6, 0xbe, 0x9c, 0xd9, 0xc0, 0x0a, 0x9e, 0x48, 0x29, 0x86, 0x68, 0x1c,
+	0xa1, 0x7a, 0x5f, 0xd2, 0x8d, 0x2e, 0x7c, 0x01, 0x9d, 0x58, 0x99, 0x93, 0x9b, 0xeb, 0xa2, 0xd0,
+	0x96, 0x4b, 0x45, 0xac, 0x49, 0xc1, 0x89, 0x3c, 0x17, 0xa3, 0xc0, 0x18, 0xfc, 0x25, 0xb1, 0xd7,
+	0x19, 0x7a, 0x52, 0x00, 0xf2, 0xb9, 0x90, 0x8e, 0x0a, 0x1b, 0x5c, 0x16, 0xa7, 0x5c, 0x62, 0x28,
+	0xa2, 0x7e, 0x2a, 0x5f, 0x88, 0xf3, 0x19, 0x6a, 0x43, 0xaa, 0xcc, 0x1d, 0x65, 0x96, 0x95, 0xf6,
+	0xda, 0x32, 0x74, 0xe3, 0xe6, 0x38, 0xb5, 0x2e, 0xfe, 0x12, 0x12, 0x44, 0xdf, 0x06, 0x5f, 0xda,
+	0x59, 0xe9, 0x90, 0x2f, 0x08, 0x7a, 0xf2, 0x4c, 0x0c, 0x02, 0xeb, 0x79, 0x6e, 0x28, 0xc6, 0x20,
+	0x05, 0x7d, 0xd9, 0x17, 0xa7, 0x9a, 0x3d, 0x39, 0x46, 0x03, 0x03, 0x39, 0x12, 0xbd, 0xc0, 0x78,
+	0x8d, 0xda, 0xe0, 0xd4, 0x10, 0x0c, 0x63, 0x20, 0x85, 0x1e, 0x4b, 0x63, 0x8b, 0x02, 0x46, 0xd3,
+	0xdd, 0xc3, 0x63, 0xda, 0xfa, 0xf9, 0x98, 0xb6, 0xbe, 0xee, 0xd3, 0xe4, 0x61, 0x9f, 0x26, 0x3f,
+	0xf6, 0x69, 0xf2, 0x6b, 0x9f, 0x26, 0x62, 0x58, 0xd5, 0x77, 0xe3, 0xbf, 0x8c, 0xa7, 0xdd, 0x08,
+	0x38, 0x8f, 0xe8, 0xf3, 0xe4, 0xc3, 0xeb, 0xff, 0xb9, 0xde, 0x9b, 0x66, 0x53, 0x7d, 0x6b, 0x1f,
+	0xb9, 0x3c, 0x5b, 0x74, 0x0e, 0xf6, 0xab, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x0a, 0x2d,
+	0x67, 0x06, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.proto b/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.proto
new file mode 100644
index 0000000..adf4704
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/code.proto
@@ -0,0 +1,186 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+option go_package = "github.com/containerd/containerd/protobuf/google/rpc;rpc";
+option java_multiple_files = true;
+option java_outer_classname = "CodeProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The canonical error codes for Google APIs.
+//
+//
+// Sometimes multiple error codes may apply.  Services should return
+// the most specific error code that applies.  For example, prefer
+// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
+// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
+enum Code {
+  // Not an error; returned on success
+  //
+  // HTTP Mapping: 200 OK
+  OK = 0;
+
+  // The operation was cancelled, typically by the caller.
+  //
+  // HTTP Mapping: 499 Client Closed Request
+  CANCELLED = 1;
+
+  // Unknown error.  For example, this error may be returned when
+  // a `Status` value received from another address space belongs to
+  // an error space that is not known in this address space.  Also
+  // errors raised by APIs that do not return enough error information
+  // may be converted to this error.
+  //
+  // HTTP Mapping: 500 Internal Server Error
+  UNKNOWN = 2;
+
+  // The client specified an invalid argument.  Note that this differs
+  // from `FAILED_PRECONDITION`.  `INVALID_ARGUMENT` indicates arguments
+  // that are problematic regardless of the state of the system
+  // (e.g., a malformed file name).
+  //
+  // HTTP Mapping: 400 Bad Request
+  INVALID_ARGUMENT = 3;
+
+  // The deadline expired before the operation could complete. For operations
+  // that change the state of the system, this error may be returned
+  // even if the operation has completed successfully.  For example, a
+  // successful response from a server could have been delayed long
+  // enough for the deadline to expire.
+  //
+  // HTTP Mapping: 504 Gateway Timeout
+  DEADLINE_EXCEEDED = 4;
+
+  // Some requested entity (e.g., file or directory) was not found.
+  //
+  // Note to server developers: if a request is denied for an entire class
+  // of users, such as gradual feature rollout or undocumented whitelist,
+  // `NOT_FOUND` may be used. If a request is denied for some users within
+  // a class of users, such as user-based access control, `PERMISSION_DENIED`
+  // must be used.
+  //
+  // HTTP Mapping: 404 Not Found
+  NOT_FOUND = 5;
+
+  // The entity that a client attempted to create (e.g., file or directory)
+  // already exists.
+  //
+  // HTTP Mapping: 409 Conflict
+  ALREADY_EXISTS = 6;
+
+  // The caller does not have permission to execute the specified
+  // operation. `PERMISSION_DENIED` must not be used for rejections
+  // caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
+  // instead for those errors). `PERMISSION_DENIED` must not be
+  // used if the caller can not be identified (use `UNAUTHENTICATED`
+  // instead for those errors). This error code does not imply the
+  // request is valid or the requested entity exists or satisfies
+  // other pre-conditions.
+  //
+  // HTTP Mapping: 403 Forbidden
+  PERMISSION_DENIED = 7;
+
+  // The request does not have valid authentication credentials for the
+  // operation.
+  //
+  // HTTP Mapping: 401 Unauthorized
+  UNAUTHENTICATED = 16;
+
+  // Some resource has been exhausted, perhaps a per-user quota, or
+  // perhaps the entire file system is out of space.
+  //
+  // HTTP Mapping: 429 Too Many Requests
+  RESOURCE_EXHAUSTED = 8;
+
+  // The operation was rejected because the system is not in a state
+  // required for the operation's execution.  For example, the directory
+  // to be deleted is non-empty, an rmdir operation is applied to
+  // a non-directory, etc.
+  //
+  // Service implementors can use the following guidelines to decide
+  // between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
+  //  (a) Use `UNAVAILABLE` if the client can retry just the failing call.
+  //  (b) Use `ABORTED` if the client should retry at a higher level
+  //      (e.g., when a client-specified test-and-set fails, indicating the
+  //      client should restart a read-modify-write sequence).
+  //  (c) Use `FAILED_PRECONDITION` if the client should not retry until
+  //      the system state has been explicitly fixed.  E.g., if an "rmdir"
+  //      fails because the directory is non-empty, `FAILED_PRECONDITION`
+  //      should be returned since the client should not retry unless
+  //      the files are deleted from the directory.
+  //
+  // HTTP Mapping: 400 Bad Request
+  FAILED_PRECONDITION = 9;
+
+  // The operation was aborted, typically due to a concurrency issue such as
+  // a sequencer check failure or transaction abort.
+  //
+  // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+  // `ABORTED`, and `UNAVAILABLE`.
+  //
+  // HTTP Mapping: 409 Conflict
+  ABORTED = 10;
+
+  // The operation was attempted past the valid range.  E.g., seeking or
+  // reading past end-of-file.
+  //
+  // Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
+  // be fixed if the system state changes. For example, a 32-bit file
+  // system will generate `INVALID_ARGUMENT` if asked to read at an
+  // offset that is not in the range [0,2^32-1], but it will generate
+  // `OUT_OF_RANGE` if asked to read from an offset past the current
+  // file size.
+  //
+  // There is a fair bit of overlap between `FAILED_PRECONDITION` and
+  // `OUT_OF_RANGE`.  We recommend using `OUT_OF_RANGE` (the more specific
+  // error) when it applies so that callers who are iterating through
+  // a space can easily look for an `OUT_OF_RANGE` error to detect when
+  // they are done.
+  //
+  // HTTP Mapping: 400 Bad Request
+  OUT_OF_RANGE = 11;
+
+  // The operation is not implemented or is not supported/enabled in this
+  // service.
+  //
+  // HTTP Mapping: 501 Not Implemented
+  UNIMPLEMENTED = 12;
+
+  // Internal errors.  This means that some invariants expected by the
+  // underlying system have been broken.  This error code is reserved
+  // for serious errors.
+  //
+  // HTTP Mapping: 500 Internal Server Error
+  INTERNAL = 13;
+
+  // The service is currently unavailable.  This is most likely a
+  // transient condition, which can be corrected by retrying with
+  // a backoff.
+  //
+  // See the guidelines above for deciding between `FAILED_PRECONDITION`,
+  // `ABORTED`, and `UNAVAILABLE`.
+  //
+  // HTTP Mapping: 503 Service Unavailable
+  UNAVAILABLE = 14;
+
+  // Unrecoverable data loss or corruption.
+  //
+  // HTTP Mapping: 500 Internal Server Error
+  DATA_LOSS = 15;
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/doc.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/doc.go
new file mode 100644
index 0000000..9ab1e3e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/doc.go
@@ -0,0 +1 @@
+package rpc
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go
new file mode 100644
index 0000000..a61229d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.pb.go
@@ -0,0 +1,2555 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/protobuf/google/rpc/error_details.proto
+// DO NOT EDIT!
+
+package rpc
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// Describes when the clients can retry a failed request. Clients could ignore
+// the recommendation here or retry when this information is missing from error
+// responses.
+//
+// It's always recommended that clients should use exponential backoff when
+// retrying.
+//
+// Clients should wait until `retry_delay` amount of time has passed since
+// receiving the error response before retrying.  If retrying requests also
+// fail, clients should use an exponential backoff scheme to gradually increase
+// the delay between retries based on `retry_delay`, until either a maximum
+// number of retires have been reached or a maximum retry delay cap has been
+// reached.
+type RetryInfo struct {
+	// Clients should wait at least this long between retrying the same request.
+	RetryDelay *google_protobuf.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay" json:"retry_delay,omitempty"`
+}
+
+func (m *RetryInfo) Reset()                    { *m = RetryInfo{} }
+func (*RetryInfo) ProtoMessage()               {}
+func (*RetryInfo) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{0} }
+
+// Describes additional debugging info.
+type DebugInfo struct {
+	// The stack trace entries indicating where the error occurred.
+	StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries" json:"stack_entries,omitempty"`
+	// Additional debugging information provided by the server.
+	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
+}
+
+func (m *DebugInfo) Reset()                    { *m = DebugInfo{} }
+func (*DebugInfo) ProtoMessage()               {}
+func (*DebugInfo) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{1} }
+
+// Describes how a quota check failed.
+//
+// For example if a daily limit was exceeded for the calling project,
+// a service could respond with a QuotaFailure detail containing the project
+// id and the description of the quota limit that was exceeded.  If the
+// calling project hasn't enabled the service in the developer console, then
+// a service could respond with the project id and set `service_disabled`
+// to true.
+//
+// Also see RetryDetail and Help types for other details about handling a
+// quota failure.
+type QuotaFailure struct {
+	// Describes all quota violations.
+	Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations" json:"violations,omitempty"`
+}
+
+func (m *QuotaFailure) Reset()                    { *m = QuotaFailure{} }
+func (*QuotaFailure) ProtoMessage()               {}
+func (*QuotaFailure) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{2} }
+
+// A message type used to describe a single quota violation.  For example, a
+// daily quota or a custom quota that was exceeded.
+type QuotaFailure_Violation struct {
+	// The subject on which the quota check failed.
+	// For example, "clientip:<ip address of client>" or "project:<Google
+	// developer project id>".
+	Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"`
+	// A description of how the quota check failed. Clients can use this
+	// description to find more about the quota configuration in the service's
+	// public documentation, or find the relevant quota limit to adjust through
+	// developer console.
+	//
+	// For example: "Service disabled" or "Daily Limit for read operations
+	// exceeded".
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (m *QuotaFailure_Violation) Reset()      { *m = QuotaFailure_Violation{} }
+func (*QuotaFailure_Violation) ProtoMessage() {}
+func (*QuotaFailure_Violation) Descriptor() ([]byte, []int) {
+	return fileDescriptorErrorDetails, []int{2, 0}
+}
+
+// Describes what preconditions have failed.
+//
+// For example, if an RPC failed because it required the Terms of Service to be
+// acknowledged, it could list the terms of service violation in the
+// PreconditionFailure message.
+type PreconditionFailure struct {
+	// Describes all precondition violations.
+	Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations" json:"violations,omitempty"`
+}
+
+func (m *PreconditionFailure) Reset()                    { *m = PreconditionFailure{} }
+func (*PreconditionFailure) ProtoMessage()               {}
+func (*PreconditionFailure) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{3} }
+
+// A message type used to describe a single precondition failure.
+type PreconditionFailure_Violation struct {
+	// The type of PreconditionFailure. We recommend using a service-specific
+	// enum type to define the supported precondition violation types. For
+	// example, "TOS" for "Terms of Service violation".
+	Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+	// The subject, relative to the type, that failed.
+	// For example, "google.com/cloud" relative to the "TOS" type would
+	// indicate which terms of service is being referenced.
+	Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"`
+	// A description of how the precondition failed. Developers can use this
+	// description to understand how to fix the failure.
+	//
+	// For example: "Terms of service not accepted".
+	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (m *PreconditionFailure_Violation) Reset()      { *m = PreconditionFailure_Violation{} }
+func (*PreconditionFailure_Violation) ProtoMessage() {}
+func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) {
+	return fileDescriptorErrorDetails, []int{3, 0}
+}
+
+// Describes violations in a client request. This error type focuses on the
+// syntactic aspects of the request.
+type BadRequest struct {
+	// Describes all violations in a client request.
+	FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations" json:"field_violations,omitempty"`
+}
+
+func (m *BadRequest) Reset()                    { *m = BadRequest{} }
+func (*BadRequest) ProtoMessage()               {}
+func (*BadRequest) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{4} }
+
+// A message type used to describe a single bad request field.
+type BadRequest_FieldViolation struct {
+	// A path leading to a field in the request body. The value will be a
+	// sequence of dot-separated identifiers that identify a protocol buffer
+	// field. E.g., "field_violations.field" would identify this field.
+	Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+	// A description of why the request element is bad.
+	Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (m *BadRequest_FieldViolation) Reset()      { *m = BadRequest_FieldViolation{} }
+func (*BadRequest_FieldViolation) ProtoMessage() {}
+func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) {
+	return fileDescriptorErrorDetails, []int{4, 0}
+}
+
+// Contains metadata about the request that clients can attach when filing a bug
+// or providing other forms of feedback.
+type RequestInfo struct {
+	// An opaque string that should only be interpreted by the service generating
+	// it. For example, it can be used to identify requests in the service's logs.
+	RequestID string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
+	// Any data that was used to serve this request. For example, an encrypted
+	// stack trace that can be sent back to the service provider for debugging.
+	ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"`
+}
+
+func (m *RequestInfo) Reset()                    { *m = RequestInfo{} }
+func (*RequestInfo) ProtoMessage()               {}
+func (*RequestInfo) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{5} }
+
+// Describes the resource that is being accessed.
+type ResourceInfo struct {
+	// A name for the type of resource being accessed, e.g. "sql table",
+	// "cloud storage bucket", "file", "Google calendar"; or the type URL
+	// of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
+	ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"`
+	// The name of the resource being accessed.  For example, a shared calendar
+	// name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
+	// error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+	ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"`
+	// The owner of the resource (optional).
+	// For example, "user:<owner email>" or "project:<Google developer project
+	// id>".
+	Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"`
+	// Describes what error is encountered when accessing this resource.
+	// For example, updating a cloud project may require the `writer` permission
+	// on the developer console project.
+	Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"`
+}
+
+func (m *ResourceInfo) Reset()                    { *m = ResourceInfo{} }
+func (*ResourceInfo) ProtoMessage()               {}
+func (*ResourceInfo) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{6} }
+
+// Provides links to documentation or for performing an out of band action.
+//
+// For example, if a quota check failed with an error indicating the calling
+// project hasn't enabled the accessed service, this can contain a URL pointing
+// directly to the right place in the developer console to flip the bit.
+type Help struct {
+	// URL(s) pointing to additional information on handling the current error.
+	Links []*Help_Link `protobuf:"bytes,1,rep,name=links" json:"links,omitempty"`
+}
+
+func (m *Help) Reset()                    { *m = Help{} }
+func (*Help) ProtoMessage()               {}
+func (*Help) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{7} }
+
+// Describes a URL link.
+type Help_Link struct {
+	// Describes what the link offers.
+	Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+	// The URL of the link.
+	Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"`
+}
+
+func (m *Help_Link) Reset()                    { *m = Help_Link{} }
+func (*Help_Link) ProtoMessage()               {}
+func (*Help_Link) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{7, 0} }
+
+// Provides a localized error message that is safe to return to the user
+// which can be attached to an RPC error.
+type LocalizedMessage struct {
+	// The locale used following the specification defined at
+	// http://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+	// Examples are: "en-US", "fr-CH", "es-MX"
+	Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"`
+	// The localized error message in the above locale.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+}
+
+func (m *LocalizedMessage) Reset()                    { *m = LocalizedMessage{} }
+func (*LocalizedMessage) ProtoMessage()               {}
+func (*LocalizedMessage) Descriptor() ([]byte, []int) { return fileDescriptorErrorDetails, []int{8} }
+
+func init() {
+	proto.RegisterType((*RetryInfo)(nil), "google.rpc.RetryInfo")
+	proto.RegisterType((*DebugInfo)(nil), "google.rpc.DebugInfo")
+	proto.RegisterType((*QuotaFailure)(nil), "google.rpc.QuotaFailure")
+	proto.RegisterType((*QuotaFailure_Violation)(nil), "google.rpc.QuotaFailure.Violation")
+	proto.RegisterType((*PreconditionFailure)(nil), "google.rpc.PreconditionFailure")
+	proto.RegisterType((*PreconditionFailure_Violation)(nil), "google.rpc.PreconditionFailure.Violation")
+	proto.RegisterType((*BadRequest)(nil), "google.rpc.BadRequest")
+	proto.RegisterType((*BadRequest_FieldViolation)(nil), "google.rpc.BadRequest.FieldViolation")
+	proto.RegisterType((*RequestInfo)(nil), "google.rpc.RequestInfo")
+	proto.RegisterType((*ResourceInfo)(nil), "google.rpc.ResourceInfo")
+	proto.RegisterType((*Help)(nil), "google.rpc.Help")
+	proto.RegisterType((*Help_Link)(nil), "google.rpc.Help.Link")
+	proto.RegisterType((*LocalizedMessage)(nil), "google.rpc.LocalizedMessage")
+}
+func (m *RetryInfo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RetryInfo) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.RetryDelay != nil {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(m.RetryDelay.Size()))
+		n1, err := m.RetryDelay.MarshalTo(dAtA[i:])
+		if err != nil {
+			return 0, err
+		}
+		i += n1
+	}
+	return i, nil
+}
+
+func (m *DebugInfo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DebugInfo) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.StackEntries) > 0 {
+		for _, s := range m.StackEntries {
+			dAtA[i] = 0xa
+			i++
+			l = len(s)
+			for l >= 1<<7 {
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
+				l >>= 7
+				i++
+			}
+			dAtA[i] = uint8(l)
+			i++
+			i += copy(dAtA[i:], s)
+		}
+	}
+	if len(m.Detail) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Detail)))
+		i += copy(dAtA[i:], m.Detail)
+	}
+	return i, nil
+}
+
+func (m *QuotaFailure) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *QuotaFailure) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Violations) > 0 {
+		for _, msg := range m.Violations {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintErrorDetails(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *QuotaFailure_Violation) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *QuotaFailure_Violation) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Subject) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject)))
+		i += copy(dAtA[i:], m.Subject)
+	}
+	if len(m.Description) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
+		i += copy(dAtA[i:], m.Description)
+	}
+	return i, nil
+}
+
+func (m *PreconditionFailure) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PreconditionFailure) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Violations) > 0 {
+		for _, msg := range m.Violations {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintErrorDetails(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *PreconditionFailure_Violation) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PreconditionFailure_Violation) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Type) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Type)))
+		i += copy(dAtA[i:], m.Type)
+	}
+	if len(m.Subject) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Subject)))
+		i += copy(dAtA[i:], m.Subject)
+	}
+	if len(m.Description) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
+		i += copy(dAtA[i:], m.Description)
+	}
+	return i, nil
+}
+
+func (m *BadRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *BadRequest) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.FieldViolations) > 0 {
+		for _, msg := range m.FieldViolations {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintErrorDetails(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *BadRequest_FieldViolation) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *BadRequest_FieldViolation) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Field) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Field)))
+		i += copy(dAtA[i:], m.Field)
+	}
+	if len(m.Description) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
+		i += copy(dAtA[i:], m.Description)
+	}
+	return i, nil
+}
+
+func (m *RequestInfo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RequestInfo) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.RequestID) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.RequestID)))
+		i += copy(dAtA[i:], m.RequestID)
+	}
+	if len(m.ServingData) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ServingData)))
+		i += copy(dAtA[i:], m.ServingData)
+	}
+	return i, nil
+}
+
+func (m *ResourceInfo) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResourceInfo) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ResourceType) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceType)))
+		i += copy(dAtA[i:], m.ResourceType)
+	}
+	if len(m.ResourceName) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.ResourceName)))
+		i += copy(dAtA[i:], m.ResourceName)
+	}
+	if len(m.Owner) > 0 {
+		dAtA[i] = 0x1a
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Owner)))
+		i += copy(dAtA[i:], m.Owner)
+	}
+	if len(m.Description) > 0 {
+		dAtA[i] = 0x22
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
+		i += copy(dAtA[i:], m.Description)
+	}
+	return i, nil
+}
+
+func (m *Help) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Help) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Links) > 0 {
+		for _, msg := range m.Links {
+			dAtA[i] = 0xa
+			i++
+			i = encodeVarintErrorDetails(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func (m *Help_Link) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Help_Link) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Description) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Description)))
+		i += copy(dAtA[i:], m.Description)
+	}
+	if len(m.Url) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Url)))
+		i += copy(dAtA[i:], m.Url)
+	}
+	return i, nil
+}
+
+func (m *LocalizedMessage) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LocalizedMessage) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.Locale) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Locale)))
+		i += copy(dAtA[i:], m.Locale)
+	}
+	if len(m.Message) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Message)))
+		i += copy(dAtA[i:], m.Message)
+	}
+	return i, nil
+}
+
+func encodeFixed64ErrorDetails(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32ErrorDetails(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintErrorDetails(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *RetryInfo) Size() (n int) {
+	var l int
+	_ = l
+	if m.RetryDelay != nil {
+		l = m.RetryDelay.Size()
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func (m *DebugInfo) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.StackEntries) > 0 {
+		for _, s := range m.StackEntries {
+			l = len(s)
+			n += 1 + l + sovErrorDetails(uint64(l))
+		}
+	}
+	l = len(m.Detail)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func (m *QuotaFailure) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Violations) > 0 {
+		for _, e := range m.Violations {
+			l = e.Size()
+			n += 1 + l + sovErrorDetails(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *QuotaFailure_Violation) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Subject)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.Description)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func (m *PreconditionFailure) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Violations) > 0 {
+		for _, e := range m.Violations {
+			l = e.Size()
+			n += 1 + l + sovErrorDetails(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *PreconditionFailure_Violation) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Type)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.Subject)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.Description)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func (m *BadRequest) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.FieldViolations) > 0 {
+		for _, e := range m.FieldViolations {
+			l = e.Size()
+			n += 1 + l + sovErrorDetails(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *BadRequest_FieldViolation) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Field)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.Description)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func (m *RequestInfo) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.RequestID)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.ServingData)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func (m *ResourceInfo) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ResourceType)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.ResourceName)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.Owner)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.Description)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func (m *Help) Size() (n int) {
+	var l int
+	_ = l
+	if len(m.Links) > 0 {
+		for _, e := range m.Links {
+			l = e.Size()
+			n += 1 + l + sovErrorDetails(uint64(l))
+		}
+	}
+	return n
+}
+
+func (m *Help_Link) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Description)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.Url)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func (m *LocalizedMessage) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.Locale)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	l = len(m.Message)
+	if l > 0 {
+		n += 1 + l + sovErrorDetails(uint64(l))
+	}
+	return n
+}
+
+func sovErrorDetails(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozErrorDetails(x uint64) (n int) {
+	return sovErrorDetails(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *RetryInfo) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RetryInfo{`,
+		`RetryDelay:` + strings.Replace(fmt.Sprintf("%v", this.RetryDelay), "Duration", "google_protobuf.Duration", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *DebugInfo) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&DebugInfo{`,
+		`StackEntries:` + fmt.Sprintf("%v", this.StackEntries) + `,`,
+		`Detail:` + fmt.Sprintf("%v", this.Detail) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *QuotaFailure) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&QuotaFailure{`,
+		`Violations:` + strings.Replace(fmt.Sprintf("%v", this.Violations), "QuotaFailure_Violation", "QuotaFailure_Violation", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *QuotaFailure_Violation) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&QuotaFailure_Violation{`,
+		`Subject:` + fmt.Sprintf("%v", this.Subject) + `,`,
+		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PreconditionFailure) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PreconditionFailure{`,
+		`Violations:` + strings.Replace(fmt.Sprintf("%v", this.Violations), "PreconditionFailure_Violation", "PreconditionFailure_Violation", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *PreconditionFailure_Violation) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&PreconditionFailure_Violation{`,
+		`Type:` + fmt.Sprintf("%v", this.Type) + `,`,
+		`Subject:` + fmt.Sprintf("%v", this.Subject) + `,`,
+		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *BadRequest) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&BadRequest{`,
+		`FieldViolations:` + strings.Replace(fmt.Sprintf("%v", this.FieldViolations), "BadRequest_FieldViolation", "BadRequest_FieldViolation", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *BadRequest_FieldViolation) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&BadRequest_FieldViolation{`,
+		`Field:` + fmt.Sprintf("%v", this.Field) + `,`,
+		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *RequestInfo) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&RequestInfo{`,
+		`RequestID:` + fmt.Sprintf("%v", this.RequestID) + `,`,
+		`ServingData:` + fmt.Sprintf("%v", this.ServingData) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ResourceInfo) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ResourceInfo{`,
+		`ResourceType:` + fmt.Sprintf("%v", this.ResourceType) + `,`,
+		`ResourceName:` + fmt.Sprintf("%v", this.ResourceName) + `,`,
+		`Owner:` + fmt.Sprintf("%v", this.Owner) + `,`,
+		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Help) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Help{`,
+		`Links:` + strings.Replace(fmt.Sprintf("%v", this.Links), "Help_Link", "Help_Link", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *Help_Link) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Help_Link{`,
+		`Description:` + fmt.Sprintf("%v", this.Description) + `,`,
+		`Url:` + fmt.Sprintf("%v", this.Url) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *LocalizedMessage) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&LocalizedMessage{`,
+		`Locale:` + fmt.Sprintf("%v", this.Locale) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringErrorDetails(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *RetryInfo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RetryInfo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RetryInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RetryDelay", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.RetryDelay == nil {
+				m.RetryDelay = &google_protobuf.Duration{}
+			}
+			if err := m.RetryDelay.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DebugInfo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DebugInfo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DebugInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StackEntries", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.StackEntries = append(m.StackEntries, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Detail = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *QuotaFailure) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: QuotaFailure: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: QuotaFailure: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Violations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Violations = append(m.Violations, &QuotaFailure_Violation{})
+			if err := m.Violations[len(m.Violations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *QuotaFailure_Violation) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Violation: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Violation: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Subject = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Description = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PreconditionFailure) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PreconditionFailure: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PreconditionFailure: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Violations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Violations = append(m.Violations, &PreconditionFailure_Violation{})
+			if err := m.Violations[len(m.Violations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PreconditionFailure_Violation) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Violation: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Violation: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Type = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Subject", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Subject = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Description = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *BadRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: BadRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: BadRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field FieldViolations", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.FieldViolations = append(m.FieldViolations, &BadRequest_FieldViolation{})
+			if err := m.FieldViolations[len(m.FieldViolations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *BadRequest_FieldViolation) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: FieldViolation: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: FieldViolation: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Field = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Description = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RequestInfo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RequestInfo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RequestInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestID", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RequestID = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ServingData", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ServingData = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResourceInfo) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResourceInfo: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResourceInfo: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceType", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceType = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResourceName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ResourceName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Owner = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Description = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Help) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Help: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Help: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Links = append(m.Links, &Help_Link{})
+			if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Help_Link) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Link: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Description = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Url = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LocalizedMessage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LocalizedMessage: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LocalizedMessage: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Locale", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Locale = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipErrorDetails(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthErrorDetails
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipErrorDetails(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowErrorDetails
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowErrorDetails
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthErrorDetails
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowErrorDetails
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipErrorDetails(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthErrorDetails = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowErrorDetails   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/protobuf/google/rpc/error_details.proto", fileDescriptorErrorDetails)
+}
+
+var fileDescriptorErrorDetails = []byte{
+	// 637 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4f, 0x6f, 0xd3, 0x30,
+	0x18, 0xc6, 0xe7, 0xb5, 0x1b, 0xca, 0xdb, 0x6e, 0x8c, 0xf0, 0x47, 0xa5, 0x87, 0x50, 0x82, 0x90,
+	0x86, 0x40, 0xa9, 0x34, 0x2e, 0x68, 0xdc, 0x4a, 0xb6, 0x75, 0xd2, 0x40, 0x25, 0x42, 0x1c, 0x40,
+	0x22, 0x72, 0x93, 0xb7, 0xc5, 0x2c, 0x8d, 0x83, 0xe3, 0x0c, 0x0d, 0x09, 0x89, 0x8f, 0xc0, 0x9d,
+	0x1b, 0x27, 0xbe, 0x04, 0xf7, 0x1d, 0x39, 0x72, 0x42, 0xac, 0x9f, 0x04, 0x39, 0x71, 0xd6, 0x6c,
+	0x1d, 0x08, 0x71, 0xf3, 0xf3, 0xfa, 0xe7, 0x27, 0xcf, 0x6b, 0x3b, 0x86, 0xfe, 0x98, 0xc9, 0xd7,
+	0xd9, 0xd0, 0x09, 0xf8, 0xa4, 0x1b, 0xf0, 0x58, 0x52, 0x16, 0xa3, 0x08, 0xab, 0xc3, 0x44, 0x70,
+	0xc9, 0x87, 0xd9, 0xa8, 0x3b, 0xe6, 0x7c, 0x1c, 0x61, 0x57, 0x24, 0x41, 0x17, 0x85, 0xe0, 0xc2,
+	0x0f, 0x51, 0x52, 0x16, 0xa5, 0x4e, 0x4e, 0x98, 0x50, 0xcc, 0x3b, 0x22, 0x09, 0xda, 0x96, 0x66,
+	0x4f, 0xd6, 0x86, 0x99, 0xa0, 0x92, 0xf1, 0xb8, 0x60, 0xed, 0x1d, 0x30, 0x3c, 0x94, 0xe2, 0x70,
+	0x37, 0x1e, 0x71, 0x73, 0x13, 0x1a, 0x42, 0x09, 0x3f, 0xc4, 0x88, 0x1e, 0xb6, 0x48, 0x87, 0xac,
+	0x37, 0x36, 0xae, 0x3b, 0xda, 0xae, 0xb4, 0x70, 0x5c, 0x6d, 0xe1, 0x41, 0x4e, 0xbb, 0x0a, 0xb6,
+	0xfb, 0x60, 0xb8, 0x38, 0xcc, 0xc6, 0xb9, 0xd1, 0x2d, 0x58, 0x49, 0x25, 0x0d, 0xf6, 0x7d, 0x8c,
+	0xa5, 0x60, 0x98, 0xb6, 0x48, 0xa7, 0xb6, 0x6e, 0x78, 0xcd, 0xbc, 0xb8, 0x55, 0xd4, 0xcc, 0x6b,
+	0xb0, 0x5c, 0xe4, 0x6e, 0x2d, 0x76, 0xc8, 0xba, 0xe1, 0x69, 0x65, 0x7f, 0x26, 0xd0, 0x7c, 0x9a,
+	0x71, 0x49, 0xb7, 0x29, 0x8b, 0x32, 0x81, 0x66, 0x0f, 0xe0, 0x80, 0xf1, 0x28, 0xff, 0x66, 0x61,
+	0xd5, 0xd8, 0xb0, 0x9d, 0x59, 0x93, 0x4e, 0x95, 0x76, 0x9e, 0x97, 0xa8, 0x57, 0x59, 0xd5, 0xde,
+	0x01, 0xe3, 0x64, 0xc2, 0x6c, 0xc1, 0x85, 0x34, 0x1b, 0xbe, 0xc1, 0x40, 0xe6, 0x3d, 0x1a, 0x5e,
+	0x29, 0xcd, 0x0e, 0x34, 0x42, 0x4c, 0x03, 0xc1, 0x12, 0x05, 0xea, 0x60, 0xd5, 0x92, 0xfd, 0x8d,
+	0xc0, 0xe5, 0x81, 0xc0, 0x80, 0xc7, 0x21, 0x53, 0x85, 0x32, 0xe4, 0xee, 0x39, 0x21, 0xef, 0x54,
+	0x43, 0x9e, 0xb3, 0xe8, 0x0f, 0x59, 0x5f, 0x56, 0xb3, 0x9a, 0x50, 0x97, 0x87, 0x09, 0xea, 0xa0,
+	0xf9, 0xb8, 0x9a, 0x7f, 0xf1, 0xaf, 0xf9, 0x6b, 0xf3, 0xf9, 0xbf, 0x12, 0x80, 0x1e, 0x0d, 0x3d,
+	0x7c, 0x9b, 0x61, 0x2a, 0xcd, 0x01, 0xac, 0x8d, 0x18, 0x46, 0xa1, 0x3f, 0x17, 0xfe, 0x76, 0x35,
+	0xfc, 0x6c, 0x85, 0xb3, 0xad, 0xf0, 0x59, 0xf0, 0x8b, 0xa3, 0x53, 0x3a, 0x6d, 0xf7, 0x61, 0xf5,
+	0x34, 0x62, 0x5e, 0x81, 0xa5, 0x1c, 0xd2, 0x3d, 0x14, 0xe2, 0x1f, 0xb6, 0xfa, 0x15, 0x34, 0xf4,
+	0x47, 0xf3, 0x4b, 0x75, 0x0f, 0x40, 0x14, 0xd2, 0x67, 0xda, 0xab, 0xb7, 0x32, 0xfd, 0x79, 0xc3,
+	0x28, 0x21, 0xd7, 0x33, 0x34, 0xb0, 0x1b, 0x9a, 0x37, 0xa1, 0x99, 0xa2, 0x38, 0x60, 0xf1, 0xd8,
+	0x0f, 0xa9, 0xa4, 0xa5, 0xbf, 0xae, 0xb9, 0x54, 0x52, 0xfb, 0x13, 0x81, 0xa6, 0x87, 0x29, 0xcf,
+	0x44, 0x80, 0xe5, 0xb5, 0x15, 0x5a, 0xfb, 0x95, 0x4d, 0x6f, 0x96, 0xc5, 0x67, 0x6a, 0xf3, 0xab,
+	0x50, 0x4c, 0x27, 0xa8, 0x9d, 0x4f, 0xa0, 0x27, 0x74, 0x82, 0xaa, 0x65, 0xfe, 0x2e, 0x46, 0xa1,
+	0x4f, 0xa0, 0x10, 0x67, 0x5b, 0xae, 0xcf, 0xb7, 0xcc, 0xa1, 0xde, 0xc7, 0x28, 0x31, 0xef, 0xc2,
+	0x52, 0xc4, 0xe2, 0xfd, 0xf2, 0x2c, 0xae, 0x56, 0xcf, 0x42, 0x01, 0xce, 0x1e, 0x8b, 0xf7, 0xbd,
+	0x82, 0x69, 0x6f, 0x42, 0x5d, 0xc9, 0xb3, 0xf6, 0x64, 0xce, 0xde, 0x5c, 0x83, 0x5a, 0x26, 0xca,
+	0xff, 0x4d, 0x0d, 0x6d, 0x17, 0xd6, 0xf6, 0x78, 0x40, 0x23, 0xf6, 0x1e, 0xc3, 0xc7, 0x98, 0xa6,
+	0x74, 0x8c, 0xea, 0xc7, 0x8c, 0x54, 0xad, 0xec, 0x5f, 0x2b, 0x75, 0xed, 0x26, 0x05, 0x52, 0x5e,
+	0x3b, 0x2d, 0x7b, 0x1f, 0x8e, 0x8e, 0xad, 0x85, 0x1f, 0xc7, 0xd6, 0xc2, 0xc7, 0xa9, 0x45, 0x8e,
+	0xa6, 0x16, 0xf9, 0x3e, 0xb5, 0xc8, 0xaf, 0xa9, 0x45, 0x60, 0x35, 0xe0, 0x93, 0x4a, 0xf8, 0xde,
+	0xa5, 0x2d, 0xf5, 0x60, 0xb9, 0xc5, 0x7b, 0x35, 0x50, 0x2f, 0xca, 0x80, 0xbc, 0x78, 0xf0, 0x3f,
+	0x4f, 0xdf, 0x43, 0x91, 0x04, 0x5f, 0x16, 0x6b, 0xde, 0xe0, 0xd1, 0x70, 0x39, 0x9f, 0xbe, 0xff,
+	0x3b, 0x00, 0x00, 0xff, 0xff, 0xfa, 0xbf, 0x78, 0x96, 0x43, 0x05, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.proto b/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.proto
new file mode 100644
index 0000000..f1b9062
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/error_details.proto
@@ -0,0 +1,200 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "google/protobuf/duration.proto";
+
+option go_package = "github.com/containerd/containerd/protobuf/google/rpc;rpc";
+option java_multiple_files = true;
+option java_outer_classname = "ErrorDetailsProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// Describes when the clients can retry a failed request. Clients could ignore
+// the recommendation here or retry when this information is missing from error
+// responses.
+//
+// It's always recommended that clients should use exponential backoff when
+// retrying.
+//
+// Clients should wait until `retry_delay` amount of time has passed since
+// receiving the error response before retrying.  If retrying requests also
+// fail, clients should use an exponential backoff scheme to gradually increase
+// the delay between retries based on `retry_delay`, until either a maximum
+// number of retires have been reached or a maximum retry delay cap has been
+// reached.
+message RetryInfo {
+  // Clients should wait at least this long between retrying the same request.
+  google.protobuf.Duration retry_delay = 1;
+}
+
+// Describes additional debugging info.
+message DebugInfo {
+  // The stack trace entries indicating where the error occurred.
+  repeated string stack_entries = 1;
+
+  // Additional debugging information provided by the server.
+  string detail = 2;
+}
+
+// Describes how a quota check failed.
+//
+// For example if a daily limit was exceeded for the calling project,
+// a service could respond with a QuotaFailure detail containing the project
+// id and the description of the quota limit that was exceeded.  If the
+// calling project hasn't enabled the service in the developer console, then
+// a service could respond with the project id and set `service_disabled`
+// to true.
+//
+// Also see RetryDetail and Help types for other details about handling a
+// quota failure.
+message QuotaFailure {
+  // A message type used to describe a single quota violation.  For example, a
+  // daily quota or a custom quota that was exceeded.
+  message Violation {
+    // The subject on which the quota check failed.
+    // For example, "clientip:<ip address of client>" or "project:<Google
+    // developer project id>".
+    string subject = 1;
+
+    // A description of how the quota check failed. Clients can use this
+    // description to find more about the quota configuration in the service's
+    // public documentation, or find the relevant quota limit to adjust through
+    // developer console.
+    //
+    // For example: "Service disabled" or "Daily Limit for read operations
+    // exceeded".
+    string description = 2;
+  }
+
+  // Describes all quota violations.
+  repeated Violation violations = 1;
+}
+
+// Describes what preconditions have failed.
+//
+// For example, if an RPC failed because it required the Terms of Service to be
+// acknowledged, it could list the terms of service violation in the
+// PreconditionFailure message.
+message PreconditionFailure {
+  // A message type used to describe a single precondition failure.
+  message Violation {
+    // The type of PreconditionFailure. We recommend using a service-specific
+    // enum type to define the supported precondition violation types. For
+    // example, "TOS" for "Terms of Service violation".
+    string type = 1;
+
+    // The subject, relative to the type, that failed.
+    // For example, "google.com/cloud" relative to the "TOS" type would
+    // indicate which terms of service is being referenced.
+    string subject = 2;
+
+    // A description of how the precondition failed. Developers can use this
+    // description to understand how to fix the failure.
+    //
+    // For example: "Terms of service not accepted".
+    string description = 3;
+  }
+
+  // Describes all precondition violations.
+  repeated Violation violations = 1;
+}
+
+// Describes violations in a client request. This error type focuses on the
+// syntactic aspects of the request.
+message BadRequest {
+  // A message type used to describe a single bad request field.
+  message FieldViolation {
+    // A path leading to a field in the request body. The value will be a
+    // sequence of dot-separated identifiers that identify a protocol buffer
+    // field. E.g., "field_violations.field" would identify this field.
+    string field = 1;
+
+    // A description of why the request element is bad.
+    string description = 2;
+  }
+
+  // Describes all violations in a client request.
+  repeated FieldViolation field_violations = 1;
+}
+
+// Contains metadata about the request that clients can attach when filing a bug
+// or providing other forms of feedback.
+message RequestInfo {
+  // An opaque string that should only be interpreted by the service generating
+  // it. For example, it can be used to identify requests in the service's logs.
+  string request_id = 1;
+
+  // Any data that was used to serve this request. For example, an encrypted
+  // stack trace that can be sent back to the service provider for debugging.
+  string serving_data = 2;
+}
+
+// Describes the resource that is being accessed.
+message ResourceInfo {
+  // A name for the type of resource being accessed, e.g. "sql table",
+  // "cloud storage bucket", "file", "Google calendar"; or the type URL
+  // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic".
+  string resource_type = 1;
+
+  // The name of the resource being accessed.  For example, a shared calendar
+  // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current
+  // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED].
+  string resource_name = 2;
+
+  // The owner of the resource (optional).
+  // For example, "user:<owner email>" or "project:<Google developer project
+  // id>".
+  string owner = 3;
+
+  // Describes what error is encountered when accessing this resource.
+  // For example, updating a cloud project may require the `writer` permission
+  // on the developer console project.
+  string description = 4;
+}
+
+// Provides links to documentation or for performing an out of band action.
+//
+// For example, if a quota check failed with an error indicating the calling
+// project hasn't enabled the accessed service, this can contain a URL pointing
+// directly to the right place in the developer console to flip the bit.
+message Help {
+  // Describes a URL link.
+  message Link {
+    // Describes what the link offers.
+    string description = 1;
+
+    // The URL of the link.
+    string url = 2;
+  }
+
+  // URL(s) pointing to additional information on handling the current error.
+  repeated Link links = 1;
+}
+
+// Provides a localized error message that is safe to return to the user
+// which can be attached to an RPC error.
+message LocalizedMessage {
+  // The locale used following the specification defined at
+  // http://www.rfc-editor.org/rfc/bcp/bcp47.txt.
+  // Examples are: "en-US", "fr-CH", "es-MX"
+  string locale = 1;
+
+  // The localized error message in the above locale.
+  string message = 2;
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go b/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go
new file mode 100644
index 0000000..80927bf
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.pb.go
@@ -0,0 +1,468 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/protobuf/google/rpc/status.proto
+// DO NOT EDIT!
+
+package rpc
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf1 "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. It is used by
+// [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error message,
+// and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed.  The
+// error message should be a developer-facing English message that helps
+// developers *understand* and *resolve* the error. If a localized user-facing
+// error message is needed, put the localized message in the error details or
+// localize it in the client. The optional error details may contain arbitrary
+// information about the error. There is a predefined set of error detail types
+// in the package `google.rpc` that can be used for common error conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+//     it may embed the `Status` in the normal response to indicate the partial
+//     errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+//     have a `Status` message for error reporting.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+//     `Status` message should be used directly inside batch response, one for
+//     each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+//     results in its response, the status of those operations should be
+//     represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+//     be used directly after any stripping needed for security/privacy reasons.
+type Status struct {
+	// The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+	Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+	// A developer-facing error message, which should be in English. Any
+	// user-facing error message should be localized and sent in the
+	// [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+	Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+	// A list of messages that carry the error details.  There is a common set of
+	// message types for APIs to use.
+	Details []*google_protobuf1.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
+}
+
+func (m *Status) Reset()                    { *m = Status{} }
+func (*Status) ProtoMessage()               {}
+func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorStatus, []int{0} }
+
+func init() {
+	proto.RegisterType((*Status)(nil), "google.rpc.Status")
+}
+func (m *Status) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Status) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if m.Code != 0 {
+		dAtA[i] = 0x8
+		i++
+		i = encodeVarintStatus(dAtA, i, uint64(m.Code))
+	}
+	if len(m.Message) > 0 {
+		dAtA[i] = 0x12
+		i++
+		i = encodeVarintStatus(dAtA, i, uint64(len(m.Message)))
+		i += copy(dAtA[i:], m.Message)
+	}
+	if len(m.Details) > 0 {
+		for _, msg := range m.Details {
+			dAtA[i] = 0x1a
+			i++
+			i = encodeVarintStatus(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
+	return i, nil
+}
+
+func encodeFixed64Status(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Status(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintStatus(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *Status) Size() (n int) {
+	var l int
+	_ = l
+	if m.Code != 0 {
+		n += 1 + sovStatus(uint64(m.Code))
+	}
+	l = len(m.Message)
+	if l > 0 {
+		n += 1 + l + sovStatus(uint64(l))
+	}
+	if len(m.Details) > 0 {
+		for _, e := range m.Details {
+			l = e.Size()
+			n += 1 + l + sovStatus(uint64(l))
+		}
+	}
+	return n
+}
+
+func sovStatus(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozStatus(x uint64) (n int) {
+	return sovStatus(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *Status) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&Status{`,
+		`Code:` + fmt.Sprintf("%v", this.Code) + `,`,
+		`Message:` + fmt.Sprintf("%v", this.Message) + `,`,
+		`Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "Any", "google_protobuf1.Any", 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringStatus(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *Status) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowStatus
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Status: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType)
+			}
+			m.Code = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowStatus
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Code |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowStatus
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthStatus
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Message = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowStatus
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthStatus
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Details = append(m.Details, &google_protobuf1.Any{})
+			if err := m.Details[len(m.Details)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipStatus(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthStatus
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipStatus(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowStatus
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowStatus
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowStatus
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthStatus
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowStatus
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipStatus(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthStatus = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowStatus   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/protobuf/google/rpc/status.proto", fileDescriptorStatus)
+}
+
+var fileDescriptorStatus = []byte{
+	// 236 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x4c, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x16, 0x14, 0xe5, 0x97, 0xe4, 0x27, 0x95, 0xa6, 0xe9, 0xa7, 0xe7, 0xe7, 0xa7,
+	0xe7, 0xa4, 0xea, 0x17, 0x15, 0x24, 0xeb, 0x17, 0x97, 0x24, 0x96, 0x94, 0x16, 0xeb, 0x81, 0xa5,
+	0x84, 0xb8, 0x20, 0x12, 0x7a, 0x45, 0x05, 0xc9, 0x52, 0x92, 0x50, 0x45, 0x70, 0x4d, 0x89, 0x79,
+	0x95, 0x10, 0x65, 0x4a, 0x69, 0x5c, 0x6c, 0xc1, 0x60, 0x6d, 0x42, 0x42, 0x5c, 0x2c, 0xc9, 0xf9,
+	0x29, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x60, 0xb6, 0x90, 0x04, 0x17, 0x7b, 0x6e,
+	0x6a, 0x71, 0x71, 0x62, 0x7a, 0xaa, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x8c, 0x2b, 0xa4,
+	0xc7, 0xc5, 0x9e, 0x92, 0x5a, 0x92, 0x98, 0x99, 0x53, 0x2c, 0xc1, 0xac, 0xc0, 0xac, 0xc1, 0x6d,
+	0x24, 0xa2, 0x07, 0xb5, 0x10, 0x66, 0x89, 0x9e, 0x63, 0x5e, 0x65, 0x10, 0x4c, 0x91, 0x53, 0xf9,
+	0x89, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e,
+	0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0xb9, 0xf8, 0x92, 0xf3, 0x73, 0xf5, 0x10,
+	0x8e, 0x75, 0xe2, 0x86, 0xb8, 0x27, 0x00, 0x64, 0x4c, 0x00, 0x63, 0x94, 0x05, 0x39, 0x41, 0x61,
+	0x5d, 0x54, 0x90, 0xbc, 0x88, 0x89, 0x39, 0x28, 0xc0, 0x39, 0x89, 0x0d, 0x2c, 0x6d, 0x0c, 0x08,
+	0x00, 0x00, 0xff, 0xff, 0x7d, 0xb3, 0x06, 0x51, 0x53, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.proto b/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.proto
new file mode 100644
index 0000000..55f4ea0
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/google/rpc/status.proto
@@ -0,0 +1,92 @@
+// Copyright 2017 Google Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+syntax = "proto3";
+
+package google.rpc;
+
+import "google/protobuf/any.proto";
+
+option go_package = "github.com/containerd/containerd/protobuf/google/rpc;rpc";
+option java_multiple_files = true;
+option java_outer_classname = "StatusProto";
+option java_package = "com.google.rpc";
+option objc_class_prefix = "RPC";
+
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. It is used by
+// [gRPC](https://github.com/grpc). The error model is designed to be:
+//
+// - Simple to use and understand for most users
+// - Flexible enough to meet unexpected needs
+//
+// # Overview
+//
+// The `Status` message contains three pieces of data: error code, error message,
+// and error details. The error code should be an enum value of
+// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed.  The
+// error message should be a developer-facing English message that helps
+// developers *understand* and *resolve* the error. If a localized user-facing
+// error message is needed, put the localized message in the error details or
+// localize it in the client. The optional error details may contain arbitrary
+// information about the error. There is a predefined set of error detail types
+// in the package `google.rpc` that can be used for common error conditions.
+//
+// # Language mapping
+//
+// The `Status` message is the logical representation of the error model, but it
+// is not necessarily the actual wire format. When the `Status` message is
+// exposed in different client libraries and different wire protocols, it can be
+// mapped differently. For example, it will likely be mapped to some exceptions
+// in Java, but more likely mapped to some error codes in C.
+//
+// # Other uses
+//
+// The error model and the `Status` message can be used in a variety of
+// environments, either with or without APIs, to provide a
+// consistent developer experience across different environments.
+//
+// Example uses of this error model include:
+//
+// - Partial errors. If a service needs to return partial errors to the client,
+//     it may embed the `Status` in the normal response to indicate the partial
+//     errors.
+//
+// - Workflow errors. A typical workflow has multiple steps. Each step may
+//     have a `Status` message for error reporting.
+//
+// - Batch operations. If a client uses batch request and batch response, the
+//     `Status` message should be used directly inside batch response, one for
+//     each error sub-response.
+//
+// - Asynchronous operations. If an API call embeds asynchronous operation
+//     results in its response, the status of those operations should be
+//     represented directly using the `Status` message.
+//
+// - Logging. If some API errors are stored in logs, the message `Status` could
+//     be used directly after any stripping needed for security/privacy reasons.
+message Status {
+  // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code].
+  int32 code = 1;
+
+  // A developer-facing error message, which should be in English. Any
+  // user-facing error message should be localized and sent in the
+  // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client.
+  string message = 2;
+
+  // A list of messages that carry the error details.  There is a common set of
+  // message types for APIs to use.
+  repeated google.protobuf.Any details = 3;
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go b/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go
new file mode 100644
index 0000000..b0736c3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/plugin/doc.go
@@ -0,0 +1 @@
+package plugin
diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go
new file mode 100644
index 0000000..797b691
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.pb.go
@@ -0,0 +1,73 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
+// DO NOT EDIT!
+
+/*
+Package plugin is a generated protocol buffer package.
+
+It is generated from these files:
+	github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
+
+It has these top-level messages:
+*/
+package plugin
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+var E_FieldpathAll = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63300,
+	Name:          "containerd.plugin.fieldpath_all",
+	Tag:           "varint,63300,opt,name=fieldpath_all,json=fieldpathAll",
+	Filename:      "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto",
+}
+
+var E_Fieldpath = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64400,
+	Name:          "containerd.plugin.fieldpath",
+	Tag:           "varint,64400,opt,name=fieldpath",
+	Filename:      "github.com/containerd/containerd/protobuf/plugin/fieldpath.proto",
+}
+
+func init() {
+	proto.RegisterExtension(E_FieldpathAll)
+	proto.RegisterExtension(E_Fieldpath)
+}
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/protobuf/plugin/fieldpath.proto", fileDescriptorFieldpath)
+}
+
+var fileDescriptorFieldpath = []byte{
+	// 203 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x72, 0x48, 0xcf, 0x2c, 0xc9,
+	0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
+	0x4a, 0x41, 0x66, 0x16, 0x14, 0xe5, 0x97, 0xe4, 0x27, 0x95, 0xa6, 0xe9, 0x17, 0xe4, 0x94, 0xa6,
+	0x67, 0xe6, 0xe9, 0xa7, 0x65, 0xa6, 0xe6, 0xa4, 0x14, 0x24, 0x96, 0x64, 0xe8, 0x81, 0x65, 0x84,
+	0x04, 0x11, 0x6a, 0xf5, 0x20, 0x4a, 0xa4, 0x14, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0x11, 0x5a,
+	0x53, 0x52, 0x8b, 0x93, 0x8b, 0x32, 0x0b, 0x4a, 0xf2, 0x8b, 0x20, 0x9a, 0xac, 0x9c, 0xb9, 0x78,
+	0xe1, 0xe6, 0xc4, 0x27, 0xe6, 0xe4, 0x08, 0xc9, 0xe8, 0x41, 0xf4, 0xe8, 0xc1, 0xf4, 0xe8, 0xb9,
+	0x65, 0xe6, 0xa4, 0xfa, 0x17, 0x94, 0x64, 0xe6, 0xe7, 0x15, 0x4b, 0x1c, 0x79, 0xc7, 0xac, 0xc0,
+	0xa8, 0xc1, 0x11, 0xc4, 0x03, 0xd7, 0xe4, 0x98, 0x93, 0x63, 0x65, 0xcf, 0xc5, 0x09, 0xe7, 0x0b,
+	0xc9, 0x63, 0x18, 0xe0, 0x9b, 0x5a, 0x5c, 0x9c, 0x98, 0x0e, 0x37, 0x63, 0xc2, 0x77, 0x88, 0x19,
+	0x08, 0x3d, 0x4e, 0x12, 0x27, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xd0, 0xf0, 0x48, 0x8e,
+	0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x04, 0x04, 0x00,
+	0x00, 0xff, 0xff, 0xd6, 0x21, 0x2a, 0xb6, 0x17, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
new file mode 100644
index 0000000..0674dc6
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/plugin/fieldpath.proto
@@ -0,0 +1,40 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+package containerd.plugin;
+
+import "google/protobuf/descriptor.proto";
+
+extend google.protobuf.FileOptions {
+	optional bool fieldpath_all = 63300;
+}
+
+extend google.protobuf.MessageOptions {
+	optional bool fieldpath = 64400;
+}
diff --git a/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go b/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go
new file mode 100644
index 0000000..7a2af56
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/protobuf/plugin/helpers.go
@@ -0,0 +1,11 @@
+package plugin
+
+import (
+	"github.com/gogo/protobuf/proto"
+	"github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+)
+
+// FieldpathEnabled returns true if E_Fieldpath is enabled
+func FieldpathEnabled(file *descriptor.FileDescriptorProto, message *descriptor.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Fieldpath, proto.GetBoolExtension(file.Options, E_FieldpathAll, false))
+}
diff --git a/vendor/github.com/containerd/containerd/reaper/reaper.go b/vendor/github.com/containerd/containerd/reaper/reaper.go
new file mode 100644
index 0000000..d7dfbb2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/reaper/reaper.go
@@ -0,0 +1,94 @@
+// +build !windows
+
+package reaper
+
+import (
+	"os/exec"
+	"sync"
+	"time"
+
+	"github.com/containerd/containerd/sys"
+	runc "github.com/containerd/go-runc"
+	"github.com/pkg/errors"
+)
+
+// ErrNoSuchProcess is returned when the process no longer exists
+var ErrNoSuchProcess = errors.New("no such process")
+
+const bufferSize = 2048
+
+// Reap should be called when the process receives an SIGCHLD.  Reap will reap
+// all exited processes and close their wait channels
+func Reap() error {
+	now := time.Now()
+	exits, err := sys.Reap(false)
+	Default.Lock()
+	for c := range Default.subscribers {
+		for _, e := range exits {
+			c <- runc.Exit{
+				Timestamp: now,
+				Pid:       e.Pid,
+				Status:    e.Status,
+			}
+		}
+
+	}
+	Default.Unlock()
+	return err
+}
+
+// Default is the default monitor initialized for the package
+var Default = &Monitor{
+	subscribers: make(map[chan runc.Exit]struct{}),
+}
+
+// Monitor monitors the underlying system for process status changes
+type Monitor struct {
+	sync.Mutex
+
+	subscribers map[chan runc.Exit]struct{}
+}
+
+// Start starts the command a registers the process with the reaper
+func (m *Monitor) Start(c *exec.Cmd) (chan runc.Exit, error) {
+	ec := m.Subscribe()
+	if err := c.Start(); err != nil {
+		m.Unsubscribe(ec)
+		return nil, err
+	}
+	return ec, nil
+}
+
+// Wait blocks until a process is signal as dead.
+// User should rely on the value of the exit status to determine if the
+// command was successful or not.
+func (m *Monitor) Wait(c *exec.Cmd, ec chan runc.Exit) (int, error) {
+	for e := range ec {
+		if e.Pid == c.Process.Pid {
+			// make sure we flush all IO
+			c.Wait()
+			m.Unsubscribe(ec)
+			return e.Status, nil
+		}
+	}
+	// return no such process if the ec channel is closed and no more exit
+	// events will be sent
+	return -1, ErrNoSuchProcess
+}
+
+// Subscribe to process exit changes
+func (m *Monitor) Subscribe() chan runc.Exit {
+	c := make(chan runc.Exit, bufferSize)
+	m.Lock()
+	m.subscribers[c] = struct{}{}
+	m.Unlock()
+	return c
+}
+
+// Unsubscribe to process exit changes
+func (m *Monitor) Unsubscribe(c chan runc.Exit) {
+	m.Lock()
+	delete(m.subscribers, c)
+	close(c)
+	m.Unlock()
+}
diff --git a/vendor/github.com/containerd/containerd/reference/reference.go b/vendor/github.com/containerd/containerd/reference/reference.go
new file mode 100644
index 0000000..55c43b8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/reference/reference.go
@@ -0,0 +1,146 @@
+package reference
+
+import (
+	"errors"
+	"fmt"
+	"net/url"
+	"path"
+	"regexp"
+	"strings"
+
+	digest "github.com/opencontainers/go-digest"
+)
+
+var (
+	// ErrInvalid is returned when there is an invalid reference
+	ErrInvalid = errors.New("invalid reference")
+	// ErrObjectRequired is returned when the object is required
+	ErrObjectRequired = errors.New("object required")
+	// ErrHostnameRequired is returned when the hostname is required
+	ErrHostnameRequired = errors.New("hostname required")
+)
+
+// Spec defines the main components of a reference specification.
+//
+// A reference specification is a schema-less URI parsed into common
+// components. The two main components, locator and object, are required to be
+// supported by remotes. It represents a superset of the naming define in
+// docker's reference schema. It aims to be compatible but not prescriptive.
+//
+// While the interpretation of the components, locator and object, are up to
+// the remote, we define a few common parts, accessible via helper methods.
+//
+// The first is the hostname, which is part of the locator. This doesn't need
+// to map to a physical resource, but it must parse as a hostname. We refer to
+// this as the namespace.
+//
+// The other component made accessible by helper method is the digest. This is
+// part of the object identifier, always prefixed with an '@'. If present, the
+// remote may use the digest portion directly or resolve it against a prefix.
+// If the object does not include the `@` symbol, the return value for `Digest`
+// will be empty.
+type Spec struct {
+	// Locator is the host and path portion of the specification. The host
+	// portion may refer to an actual host or just a namespace of related
+	// images.
+	//
+	// Typically, the locator may used to resolve the remote to fetch specific
+	// resources.
+	Locator string
+
+	// Object contains the identifier for the remote resource. Classically,
+	// this is a tag but can refer to anything in a remote. By convention, any
+	// portion that may be a partial or whole digest will be preceded by an
+	// `@`. Anything preceding the `@` will be referred to as the "tag".
+	//
+	// In practice, we will see this broken down into the following formats:
+	//
+	// 1. <tag>
+	// 2. <tag>@<digest spec>
+	// 3. @<digest spec>
+	//
+	// We define the tag to be anything except '@' and ':'. <digest spec> may
+	// be a full valid digest or shortened version, possibly with elided
+	// algorithm.
+	Object string
+}
+
+var splitRe = regexp.MustCompile(`[:@]`)
+
+// Parse parses the string into a structured ref.
+func Parse(s string) (Spec, error) {
+	u, err := url.Parse("dummy://" + s)
+	if err != nil {
+		return Spec{}, err
+	}
+
+	if u.Scheme != "dummy" {
+		return Spec{}, ErrInvalid
+	}
+
+	if u.Host == "" {
+		return Spec{}, ErrHostnameRequired
+	}
+
+	var object string
+
+	if idx := splitRe.FindStringIndex(u.Path); idx != nil {
+		// This allows us to retain the @ to signify digests or shortened digests in
+		// the object.
+		object = u.Path[idx[0]:]
+		if object[:1] == ":" {
+			object = object[1:]
+		}
+		u.Path = u.Path[:idx[0]]
+	}
+
+	return Spec{
+		Locator: path.Join(u.Host, u.Path),
+		Object:  object,
+	}, nil
+}
+
+// Hostname returns the hostname portion of the locator.
+//
+// Remotes are not required to directly access the resources at this host. This
+// method is provided for convenience.
+func (r Spec) Hostname() string {
+	i := strings.Index(r.Locator, "/")
+
+	if i < 0 {
+		i = len(r.Locator) + 1
+	}
+	return r.Locator[:i]
+}
+
+// Digest returns the digest portion of the reference spec. This may be a
+// partial or invalid digest, which may be used to lookup a complete digest.
+func (r Spec) Digest() digest.Digest {
+	_, dgst := SplitObject(r.Object)
+	return dgst
+}
+
+// String returns the normalized string for the ref.
+func (r Spec) String() string {
+	if r.Object == "" {
+		return r.Locator
+	}
+	if r.Object[:1] == "@" {
+		return fmt.Sprintf("%v%v", r.Locator, r.Object)
+	}
+
+	return fmt.Sprintf("%v:%v", r.Locator, r.Object)
+}
+
+// SplitObject provides two parts of the object spec, delimited by an `@`
+// symbol.
+//
+// Either may be empty and it is the callers job to validate them
+// appropriately.
+func SplitObject(obj string) (tag string, dgst digest.Digest) {
+	parts := strings.SplitAfterN(obj, "@", 2)
+	if len(parts) < 2 {
+		return parts[0], ""
+	}
+	return parts[0], digest.Digest(parts[1])
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth.go b/vendor/github.com/containerd/containerd/remotes/docker/auth.go
new file mode 100644
index 0000000..aa33752
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/auth.go
@@ -0,0 +1,182 @@
+package docker
+
+import (
+	"net/http"
+	"sort"
+	"strings"
+)
+
+type authenticationScheme byte
+
+const (
+	basicAuth  authenticationScheme = 1 << iota // Defined in RFC 7617
+	digestAuth                                  // Defined in RFC 7616
+	bearerAuth                                  // Defined in RFC 6750
+)
+
+// challenge carries information from a WWW-Authenticate response header.
+// See RFC 2617.
+type challenge struct {
+	// scheme is the auth-scheme according to RFC 2617
+	scheme authenticationScheme
+
+	// parameters are the auth-params according to RFC 2617
+	parameters map[string]string
+}
+
+type byScheme []challenge
+
+func (bs byScheme) Len() int      { return len(bs) }
+func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
+
+// Sort in priority order: token > digest > basic
+func (bs byScheme) Less(i, j int) bool { return bs[i].scheme > bs[j].scheme }
+
+// Octet types from RFC 2616.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+	isToken octetType = 1 << iota
+	isSpace
+)
+
+func init() {
+	// OCTET      = <any 8-bit sequence of data>
+	// CHAR       = <any US-ASCII character (octets 0 - 127)>
+	// CTL        = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
+	// CR         = <US-ASCII CR, carriage return (13)>
+	// LF         = <US-ASCII LF, linefeed (10)>
+	// SP         = <US-ASCII SP, space (32)>
+	// HT         = <US-ASCII HT, horizontal-tab (9)>
+	// <">        = <US-ASCII double-quote mark (34)>
+	// CRLF       = CR LF
+	// LWS        = [CRLF] 1*( SP | HT )
+	// TEXT       = <any OCTET except CTLs, but including LWS>
+	// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+	//              | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+	// token      = 1*<any CHAR except CTLs or separators>
+	// qdtext     = <any TEXT except <">>
+
+	for c := 0; c < 256; c++ {
+		var t octetType
+		isCtl := c <= 31 || c == 127
+		isChar := 0 <= c && c <= 127
+		isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+		if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+			t |= isSpace
+		}
+		if isChar && !isCtl && !isSeparator {
+			t |= isToken
+		}
+		octetTypes[c] = t
+	}
+}
+
+func parseAuthHeader(header http.Header) []challenge {
+	challenges := []challenge{}
+	for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+		v, p := parseValueAndParams(h)
+		var s authenticationScheme
+		switch v {
+		case "basic":
+			s = basicAuth
+		case "digest":
+			s = digestAuth
+		case "bearer":
+			s = bearerAuth
+		default:
+			continue
+		}
+		challenges = append(challenges, challenge{scheme: s, parameters: p})
+	}
+	sort.Stable(byScheme(challenges))
+	return challenges
+}
+
+func parseValueAndParams(header string) (value string, params map[string]string) {
+	params = make(map[string]string)
+	value, s := expectToken(header)
+	if value == "" {
+		return
+	}
+	value = strings.ToLower(value)
+	for {
+		var pkey string
+		pkey, s = expectToken(skipSpace(s))
+		if pkey == "" {
+			return
+		}
+		if !strings.HasPrefix(s, "=") {
+			return
+		}
+		var pvalue string
+		pvalue, s = expectTokenOrQuoted(s[1:])
+		if pvalue == "" {
+			return
+		}
+		pkey = strings.ToLower(pkey)
+		params[pkey] = pvalue
+		s = skipSpace(s)
+		if !strings.HasPrefix(s, ",") {
+			return
+		}
+		s = s[1:]
+	}
+}
+
+func skipSpace(s string) (rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isSpace == 0 {
+			break
+		}
+	}
+	return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+	i := 0
+	for ; i < len(s); i++ {
+		if octetTypes[s[i]]&isToken == 0 {
+			break
+		}
+	}
+	return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+	if !strings.HasPrefix(s, "\"") {
+		return expectToken(s)
+	}
+	s = s[1:]
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '"':
+			return s[:i], s[i+1:]
+		case '\\':
+			p := make([]byte, len(s)-1)
+			j := copy(p, s[:i])
+			escape := true
+			for i = i + 1; i < len(s); i++ {
+				b := s[i]
+				switch {
+				case escape:
+					escape = false
+					p[j] = b
+					j++
+				case b == '\\':
+					escape = true
+				case b == '"':
+					return string(p[:j]), s[i+1:]
+				default:
+					p[j] = b
+					j++
+				}
+			}
+			return "", ""
+		}
+	}
+	return "", ""
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
new file mode 100644
index 0000000..46677e4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
@@ -0,0 +1,93 @@
+package docker
+
+import (
+	"context"
+	"io"
+	"net/http"
+	"path"
+	"strings"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+type dockerFetcher struct {
+	*dockerBase
+}
+
+func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
+	ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
+		logrus.Fields{
+			"base":   r.base.String(),
+			"digest": desc.Digest,
+		},
+	))
+
+	urls, err := r.getV2URLPaths(ctx, desc)
+	if err != nil {
+		return nil, err
+	}
+
+	ctx, err = contextWithRepositoryScope(ctx, r.refspec, false)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, u := range urls {
+		req, err := http.NewRequest(http.MethodGet, u, nil)
+		if err != nil {
+			return nil, err
+		}
+
+		req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
+		resp, err := r.doRequestWithRetries(ctx, req, nil)
+		if err != nil {
+			return nil, err
+		}
+
+		if resp.StatusCode > 299 {
+			resp.Body.Close()
+			if resp.StatusCode == http.StatusNotFound {
+				continue // try one of the other urls.
+			}
+			return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
+		}
+
+		return resp.Body, nil
+	}
+
+	return nil, errors.Wrapf(errdefs.ErrNotFound,
+		"could not fetch content descriptor %v (%v) from remote",
+		desc.Digest, desc.MediaType)
+}
+
+// getV2URLPaths generates the candidate urls paths for the object based on the
+// set of hints and the provided object id. URLs are returned in the order of
+// most to least likely succeed.
+func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) {
+	var urls []string
+
+	if len(desc.URLs) > 0 {
+		// handle fetch via external urls.
+		for _, u := range desc.URLs {
+			log.G(ctx).WithField("url", u).Debug("adding alternative url")
+			urls = append(urls, u)
+		}
+	}
+
+	switch desc.MediaType {
+	case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
+		images.MediaTypeDockerSchema1Manifest,
+		ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
+		urls = append(urls, r.url(path.Join("manifests", desc.Digest.String())))
+	}
+
+	// always fallback to attempting to get the object out of the blobs store.
+	urls = append(urls, r.url(path.Join("blobs", desc.Digest.String())))
+
+	return urls, nil
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go
new file mode 100644
index 0000000..24bd278
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go
@@ -0,0 +1,282 @@
+package docker
+
+import (
+	"context"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"path"
+	"strings"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/remotes"
+	digest "github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+type dockerPusher struct {
+	*dockerBase
+	tag string
+
+	// TODO: namespace tracker
+	tracker StatusTracker
+}
+
+func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
+	ctx, err := contextWithRepositoryScope(ctx, p.refspec, true)
+	if err != nil {
+		return nil, err
+	}
+	ref := remotes.MakeRefKey(ctx, desc)
+	status, err := p.tracker.GetStatus(ref)
+	if err == nil {
+		if status.Offset == status.Total {
+			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v already exists", ref)
+		}
+		// TODO: Handle incomplete status
+	} else if !errdefs.IsNotFound(err) {
+		return nil, errors.Wrap(err, "failed to get status")
+	}
+
+	var (
+		isManifest bool
+		existCheck string
+	)
+
+	switch desc.MediaType {
+	case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
+		ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
+		isManifest = true
+		existCheck = path.Join("manifests", desc.Digest.String())
+	default:
+		existCheck = path.Join("blobs", desc.Digest.String())
+	}
+
+	req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil)
+	if err != nil {
+		return nil, err
+	}
+
+	req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
+	resp, err := p.doRequestWithRetries(ctx, req, nil)
+	if err != nil {
+		if errors.Cause(err) != ErrInvalidAuthorization {
+			return nil, err
+		}
+		log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push")
+	} else {
+		if resp.StatusCode == http.StatusOK {
+			p.tracker.SetStatus(ref, Status{
+				Status: content.Status{
+					Ref: ref,
+					// TODO: Set updated time?
+				},
+			})
+			return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest)
+		}
+		if resp.StatusCode != http.StatusNotFound {
+			// TODO: log error
+			return nil, errors.Errorf("unexpected response: %s", resp.Status)
+		}
+	}
+
+	// TODO: Lookup related objects for cross repository push
+
+	if isManifest {
+		var putPath string
+		if p.tag != "" {
+			putPath = path.Join("manifests", p.tag)
+		} else {
+			putPath = path.Join("manifests", desc.Digest.String())
+		}
+
+		req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil)
+		if err != nil {
+			return nil, err
+		}
+		req.Header.Add("Content-Type", desc.MediaType)
+	} else {
+		// TODO: Do monolithic upload if size is small
+
+		// Start upload request
+		req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil)
+		if err != nil {
+			return nil, err
+		}
+
+		resp, err := p.doRequestWithRetries(ctx, req, nil)
+		if err != nil {
+			return nil, err
+		}
+
+		switch resp.StatusCode {
+		case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
+		default:
+			// TODO: log error
+			return nil, errors.Errorf("unexpected response: %s", resp.Status)
+		}
+
+		location := resp.Header.Get("Location")
+		// Support paths without host in location
+		if strings.HasPrefix(location, "/") {
+			u := p.base
+			u.Path = location
+			location = u.String()
+		}
+
+		req, err = http.NewRequest(http.MethodPut, location, nil)
+		if err != nil {
+			return nil, err
+		}
+		q := req.URL.Query()
+		q.Add("digest", desc.Digest.String())
+		req.URL.RawQuery = q.Encode()
+
+	}
+	p.tracker.SetStatus(ref, Status{
+		Status: content.Status{
+			Ref:       ref,
+			Total:     desc.Size,
+			Expected:  desc.Digest,
+			StartedAt: time.Now(),
+		},
+	})
+
+	// TODO: Support chunked upload
+
+	pr, pw := io.Pipe()
+	respC := make(chan *http.Response, 1)
+
+	req.Body = ioutil.NopCloser(pr)
+	req.ContentLength = desc.Size
+
+	go func() {
+		defer close(respC)
+		resp, err = p.doRequest(ctx, req)
+		if err != nil {
+			pr.CloseWithError(err)
+			return
+		}
+
+		switch resp.StatusCode {
+		case http.StatusOK, http.StatusCreated, http.StatusNoContent:
+		default:
+			// TODO: log error
+			pr.CloseWithError(errors.Errorf("unexpected response: %s", resp.Status))
+		}
+		respC <- resp
+	}()
+
+	return &pushWriter{
+		base:       p.dockerBase,
+		ref:        ref,
+		pipe:       pw,
+		responseC:  respC,
+		isManifest: isManifest,
+		expected:   desc.Digest,
+		tracker:    p.tracker,
+	}, nil
+}
+
+type pushWriter struct {
+	base *dockerBase
+	ref  string
+
+	pipe       *io.PipeWriter
+	responseC  <-chan *http.Response
+	isManifest bool
+
+	expected digest.Digest
+	tracker  StatusTracker
+}
+
+func (pw *pushWriter) Write(p []byte) (n int, err error) {
+	status, err := pw.tracker.GetStatus(pw.ref)
+	if err != nil {
+		return n, err
+	}
+	n, err = pw.pipe.Write(p)
+	status.Offset += int64(n)
+	status.UpdatedAt = time.Now()
+	pw.tracker.SetStatus(pw.ref, status)
+	return
+}
+
+func (pw *pushWriter) Close() error {
+	return pw.pipe.Close()
+}
+
+func (pw *pushWriter) Status() (content.Status, error) {
+	status, err := pw.tracker.GetStatus(pw.ref)
+	if err != nil {
+		return content.Status{}, err
+	}
+	return status.Status, nil
+
+}
+
+func (pw *pushWriter) Digest() digest.Digest {
+	// TODO: Get rid of this function?
+	return pw.expected
+}
+
+func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+	// Check whether read has already thrown an error
+	if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe {
+		return errors.Wrap(err, "pipe error before commit")
+	}
+
+	if err := pw.pipe.Close(); err != nil {
+		return err
+	}
+	// TODO: Update status to determine committing
+
+	// TODO: timeout waiting for response
+	resp := <-pw.responseC
+	if resp == nil {
+		return errors.New("no response")
+	}
+
+	// 201 is specified return status, some registries return
+	// 200 or 204.
+	switch resp.StatusCode {
+	case http.StatusOK, http.StatusCreated, http.StatusNoContent:
+	default:
+		return errors.Errorf("unexpected status: %s", resp.Status)
+	}
+
+	status, err := pw.tracker.GetStatus(pw.ref)
+	if err != nil {
+		return errors.Wrap(err, "failed to get status")
+	}
+
+	if size > 0 && size != status.Offset {
+		return errors.Errorf("unxpected size %d, expected %d", status.Offset, size)
+	}
+
+	if expected == "" {
+		expected = status.Expected
+	}
+
+	actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
+	if err != nil {
+		return errors.Wrap(err, "invalid content digest in response")
+	}
+
+	if actual != expected {
+		return errors.Errorf("got digest %s, expected %s", actual, expected)
+	}
+
+	return nil
+}
+
+func (pw *pushWriter) Truncate(size int64) error {
+	// TODO: if blob close request and start new request at offset
+	// TODO: always error on manifest
+	return errors.New("cannot truncate remote upload")
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go
new file mode 100644
index 0000000..7a11504
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go
@@ -0,0 +1,582 @@
+package docker
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/textproto"
+	"net/url"
+	"path"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/reference"
+	"github.com/containerd/containerd/remotes"
+	digest "github.com/opencontainers/go-digest"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"golang.org/x/net/context/ctxhttp"
+)
+
+var (
+	// ErrNoToken is returned if a request is successful but the body does not
+	// contain an authorization token.
+	ErrNoToken = errors.New("authorization server did not include a token in the response")
+
+	// ErrInvalidAuthorization is used when credentials are passed to a server but
+	// those credentials are rejected.
+	ErrInvalidAuthorization = errors.New("authorization failed")
+)
+
+type dockerResolver struct {
+	credentials func(string) (string, string, error)
+	plainHTTP   bool
+	client      *http.Client
+	tracker     StatusTracker
+}
+
+// ResolverOptions are used to configured a new Docker register resolver
+type ResolverOptions struct {
+	// Credentials provides username and secret given a host.
+	// If username is empty but a secret is given, that secret
+	// is interpretted as a long lived token.
+	Credentials func(string) (string, string, error)
+
+	// PlainHTTP specifies to use plain http and not https
+	PlainHTTP bool
+
+	// Client is the http client to used when making registry requests
+	Client *http.Client
+
+	// Tracker is used to track uploads to the registry. This is used
+	// since the registry does not have upload tracking and the existing
+	// mechanism for getting blob upload status is expensive.
+	Tracker StatusTracker
+}
+
+// NewResolver returns a new resolver to a Docker registry
+func NewResolver(options ResolverOptions) remotes.Resolver {
+	tracker := options.Tracker
+	if tracker == nil {
+		tracker = NewInMemoryTracker()
+	}
+	return &dockerResolver{
+		credentials: options.Credentials,
+		plainHTTP:   options.PlainHTTP,
+		client:      options.Client,
+		tracker:     tracker,
+	}
+}
+
+var _ remotes.Resolver = &dockerResolver{}
+
+func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) {
+	refspec, err := reference.Parse(ref)
+	if err != nil {
+		return "", ocispec.Descriptor{}, err
+	}
+
+	if refspec.Object == "" {
+		return "", ocispec.Descriptor{}, reference.ErrObjectRequired
+	}
+
+	base, err := r.base(refspec)
+	if err != nil {
+		return "", ocispec.Descriptor{}, err
+	}
+
+	fetcher := dockerFetcher{
+		dockerBase: base,
+	}
+
+	var (
+		urls []string
+		dgst = refspec.Digest()
+	)
+
+	if dgst != "" {
+		if err := dgst.Validate(); err != nil {
+			// need to fail here, since we can't actually resolve the invalid
+			// digest.
+			return "", ocispec.Descriptor{}, err
+		}
+
+		// turns out, we have a valid digest, make a url.
+		urls = append(urls, fetcher.url("manifests", dgst.String()))
+
+		// fallback to blobs on not found.
+		urls = append(urls, fetcher.url("blobs", dgst.String()))
+	} else {
+		urls = append(urls, fetcher.url("manifests", refspec.Object))
+	}
+
+	ctx, err = contextWithRepositoryScope(ctx, refspec, false)
+	if err != nil {
+		return "", ocispec.Descriptor{}, err
+	}
+	for _, u := range urls {
+		req, err := http.NewRequest(http.MethodHead, u, nil)
+		if err != nil {
+			return "", ocispec.Descriptor{}, err
+		}
+
+		// set headers for all the types we support for resolution.
+		req.Header.Set("Accept", strings.Join([]string{
+			images.MediaTypeDockerSchema2Manifest,
+			images.MediaTypeDockerSchema2ManifestList,
+			ocispec.MediaTypeImageManifest,
+			ocispec.MediaTypeImageIndex, "*"}, ", "))
+
+		log.G(ctx).Debug("resolving")
+		resp, err := fetcher.doRequestWithRetries(ctx, req, nil)
+		if err != nil {
+			return "", ocispec.Descriptor{}, err
+		}
+		resp.Body.Close() // don't care about body contents.
+
+		if resp.StatusCode > 299 {
+			if resp.StatusCode == http.StatusNotFound {
+				continue
+			}
+			return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
+		}
+
+		// this is the only point at which we trust the registry. we use the
+		// content headers to assemble a descriptor for the name. when this becomes
+		// more robust, we mostly get this information from a secure trust store.
+		dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
+
+		if dgstHeader != "" {
+			if err := dgstHeader.Validate(); err != nil {
+				return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
+			}
+			dgst = dgstHeader
+		}
+
+		if dgst == "" {
+			return "", ocispec.Descriptor{}, errors.Errorf("could not resolve digest for %v", ref)
+		}
+
+		var (
+			size       int64
+			sizeHeader = resp.Header.Get("Content-Length")
+		)
+
+		size, err = strconv.ParseInt(sizeHeader, 10, 64)
+		if err != nil {
+
+			return "", ocispec.Descriptor{}, errors.Wrapf(err, "invalid size header: %q", sizeHeader)
+		}
+		if size < 0 {
+			return "", ocispec.Descriptor{}, errors.Errorf("%q in header not a valid size", sizeHeader)
+		}
+
+		desc := ocispec.Descriptor{
+			Digest:    dgst,
+			MediaType: resp.Header.Get("Content-Type"), // need to strip disposition?
+			Size:      size,
+		}
+
+		log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
+		return ref, desc, nil
+	}
+
+	return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref)
+}
+
+func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
+	refspec, err := reference.Parse(ref)
+	if err != nil {
+		return nil, err
+	}
+
+	base, err := r.base(refspec)
+	if err != nil {
+		return nil, err
+	}
+
+	return dockerFetcher{
+		dockerBase: base,
+	}, nil
+}
+
+func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
+	refspec, err := reference.Parse(ref)
+	if err != nil {
+		return nil, err
+	}
+
+	// Manifests can be pushed by digest like any other object, but the passed in
+	// reference cannot take a digest without the associated content. A tag is allowed
+	// and will be used to tag pushed manifests.
+	if refspec.Object != "" && strings.Contains(refspec.Object, "@") {
+		return nil, errors.New("cannot use digest reference for push locator")
+	}
+
+	base, err := r.base(refspec)
+	if err != nil {
+		return nil, err
+	}
+
+	return dockerPusher{
+		dockerBase: base,
+		tag:        refspec.Object,
+		tracker:    r.tracker,
+	}, nil
+}
+
+type dockerBase struct {
+	refspec reference.Spec
+	base    url.URL
+	token   string
+
+	client   *http.Client
+	useBasic bool
+	username string
+	secret   string
+}
+
+func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
+	var (
+		err              error
+		base             url.URL
+		username, secret string
+	)
+
+	host := refspec.Hostname()
+	base.Scheme = "https"
+
+	if host == "docker.io" {
+		base.Host = "registry-1.docker.io"
+	} else {
+		base.Host = host
+
+		if r.plainHTTP || strings.HasPrefix(host, "localhost:") {
+			base.Scheme = "http"
+		}
+	}
+
+	if r.credentials != nil {
+		username, secret, err = r.credentials(base.Host)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	prefix := strings.TrimPrefix(refspec.Locator, host+"/")
+	base.Path = path.Join("/v2", prefix)
+
+	return &dockerBase{
+		refspec:  refspec,
+		base:     base,
+		client:   r.client,
+		username: username,
+		secret:   secret,
+	}, nil
+}
+
+func (r *dockerBase) url(ps ...string) string {
+	url := r.base
+	url.Path = path.Join(url.Path, path.Join(ps...))
+	return url.String()
+}
+
+func (r *dockerBase) authorize(req *http.Request) {
+	if r.useBasic {
+		req.SetBasicAuth(r.username, r.secret)
+	} else if r.token != "" {
+		req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", r.token))
+	}
+}
+
+func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
+	ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
+	log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("Do request")
+	r.authorize(req)
+	resp, err := ctxhttp.Do(ctx, r.client, req)
+	if err != nil {
+		return nil, errors.Wrap(err, "failed to do request")
+	}
+	log.G(ctx).WithFields(logrus.Fields{
+		"status":           resp.Status,
+		"response.headers": resp.Header,
+	}).Debug("fetch response received")
+	return resp, nil
+}
+
+func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) {
+	resp, err := r.doRequest(ctx, req)
+	if err != nil {
+		return nil, err
+	}
+
+	responses = append(responses, resp)
+	req, err = r.retryRequest(ctx, req, responses)
+	if err != nil {
+		resp.Body.Close()
+		return nil, err
+	}
+	if req != nil {
+		resp.Body.Close()
+		return r.doRequestWithRetries(ctx, req, responses)
+	}
+	return resp, err
+}
+
+func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) {
+	if len(responses) > 5 {
+		return nil, nil
+	}
+	last := responses[len(responses)-1]
+	if last.StatusCode == http.StatusUnauthorized {
+		log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
+		for _, c := range parseAuthHeader(last.Header) {
+			if c.scheme == bearerAuth {
+				if err := invalidAuthorization(c, responses); err != nil {
+					r.token = ""
+					return nil, err
+				}
+				if err := r.setTokenAuth(ctx, c.parameters); err != nil {
+					return nil, err
+				}
+				return copyRequest(req)
+			} else if c.scheme == basicAuth {
+				if r.username != "" && r.secret != "" {
+					r.useBasic = true
+				}
+				return copyRequest(req)
+			}
+		}
+		return nil, nil
+	} else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead {
+		// Support registries which have not properly implemented the HEAD method for
+		// manifests endpoint
+		if strings.Contains(req.URL.Path, "/manifests/") {
+			// TODO: copy request?
+			req.Method = http.MethodGet
+			return copyRequest(req)
+		}
+	}
+
+	// TODO: Handle 50x errors accounting for attempt history
+	return nil, nil
+}
+
+func invalidAuthorization(c challenge, responses []*http.Response) error {
+	errStr := c.parameters["error"]
+	if errStr == "" {
+		return nil
+	}
+
+	n := len(responses)
+	if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) {
+		return nil
+	}
+
+	return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr)
+}
+
+func sameRequest(r1, r2 *http.Request) bool {
+	if r1.Method != r2.Method {
+		return false
+	}
+	if *r1.URL != *r2.URL {
+		return false
+	}
+	return true
+}
+
+func copyRequest(req *http.Request) (*http.Request, error) {
+	ireq := *req
+	if ireq.GetBody != nil {
+		var err error
+		ireq.Body, err = ireq.GetBody()
+		if err != nil {
+			return nil, err
+		}
+	}
+	return &ireq, nil
+}
+
+func isManifestAccept(h http.Header) bool {
+	for _, ah := range h[textproto.CanonicalMIMEHeaderKey("Accept")] {
+		switch ah {
+		case images.MediaTypeDockerSchema2Manifest:
+			fallthrough
+		case images.MediaTypeDockerSchema2ManifestList:
+			fallthrough
+		case ocispec.MediaTypeImageManifest:
+			fallthrough
+		case ocispec.MediaTypeImageIndex:
+			return true
+		}
+	}
+	return false
+}
+
+func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string) error {
+	realm, ok := params["realm"]
+	if !ok {
+		return errors.New("no realm specified for token auth challenge")
+	}
+
+	realmURL, err := url.Parse(realm)
+	if err != nil {
+		return fmt.Errorf("invalid token auth challenge realm: %s", err)
+	}
+
+	to := tokenOptions{
+		realm:   realmURL.String(),
+		service: params["service"],
+	}
+
+	to.scopes = getTokenScopes(ctx, params)
+	if len(to.scopes) == 0 {
+		return errors.Errorf("no scope specified for token auth challenge")
+	}
+	if r.secret != "" {
+		// Credential information is provided, use oauth POST endpoint
+		r.token, err = r.fetchTokenWithOAuth(ctx, to)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch oauth token")
+		}
+	} else {
+		// Do request anonymously
+		r.token, err = r.getToken(ctx, to)
+		if err != nil {
+			return errors.Wrap(err, "failed to fetch anonymous token")
+		}
+	}
+
+	return nil
+}
+
+type tokenOptions struct {
+	realm   string
+	service string
+	scopes  []string
+}
+
+type postTokenResponse struct {
+	AccessToken  string    `json:"access_token"`
+	RefreshToken string    `json:"refresh_token"`
+	ExpiresIn    int       `json:"expires_in"`
+	IssuedAt     time.Time `json:"issued_at"`
+	Scope        string    `json:"scope"`
+}
+
+func (r *dockerBase) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
+	form := url.Values{}
+	form.Set("scope", strings.Join(to.scopes, " "))
+	form.Set("service", to.service)
+	// TODO: Allow setting client_id
+	form.Set("client_id", "containerd-dist-tool")
+
+	if r.username == "" {
+		form.Set("grant_type", "refresh_token")
+		form.Set("refresh_token", r.secret)
+	} else {
+		form.Set("grant_type", "password")
+		form.Set("username", r.username)
+		form.Set("password", r.secret)
+	}
+
+	resp, err := ctxhttp.PostForm(ctx, r.client, to.realm, form)
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+
+	// Registries without support for POST may return 404 for POST /v2/token.
+	// As of September 2017, GCR is known to return 404.
+	if (resp.StatusCode == 405 && r.username != "") || resp.StatusCode == 404 {
+		return r.getToken(ctx, to)
+	} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+		b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
+		log.G(ctx).WithFields(logrus.Fields{
+			"status": resp.Status,
+			"body":   string(b),
+		}).Debugf("token request failed")
+		// TODO: handle error body and write debug output
+		return "", errors.Errorf("unexpected status: %s", resp.Status)
+	}
+
+	decoder := json.NewDecoder(resp.Body)
+
+	var tr postTokenResponse
+	if err = decoder.Decode(&tr); err != nil {
+		return "", fmt.Errorf("unable to decode token response: %s", err)
+	}
+
+	return tr.AccessToken, nil
+}
+
+type getTokenResponse struct {
+	Token        string    `json:"token"`
+	AccessToken  string    `json:"access_token"`
+	ExpiresIn    int       `json:"expires_in"`
+	IssuedAt     time.Time `json:"issued_at"`
+	RefreshToken string    `json:"refresh_token"`
+}
+
+// getToken fetches a token using a GET request
+func (r *dockerBase) getToken(ctx context.Context, to tokenOptions) (string, error) {
+	req, err := http.NewRequest("GET", to.realm, nil)
+	if err != nil {
+		return "", err
+	}
+
+	reqParams := req.URL.Query()
+
+	if to.service != "" {
+		reqParams.Add("service", to.service)
+	}
+
+	for _, scope := range to.scopes {
+		reqParams.Add("scope", scope)
+	}
+
+	if r.secret != "" {
+		req.SetBasicAuth(r.username, r.secret)
+	}
+
+	req.URL.RawQuery = reqParams.Encode()
+
+	resp, err := ctxhttp.Do(ctx, r.client, req)
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode < 200 || resp.StatusCode >= 400 {
+		// TODO: handle error body and write debug output
+		return "", errors.Errorf("unexpected status: %s", resp.Status)
+	}
+
+	decoder := json.NewDecoder(resp.Body)
+
+	var tr getTokenResponse
+	if err = decoder.Decode(&tr); err != nil {
+		return "", fmt.Errorf("unable to decode token response: %s", err)
+	}
+
+	// `access_token` is equivalent to `token` and if both are specified
+	// the choice is undefined.  Canonicalize `access_token` by sticking
+	// things in `token`.
+	if tr.AccessToken != "" {
+		tr.Token = tr.AccessToken
+	}
+
+	if tr.Token == "" {
+		return "", ErrNoToken
+	}
+
+	return tr.Token, nil
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
new file mode 100644
index 0000000..99940c8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
@@ -0,0 +1,475 @@
+package schema1
+
+import (
+	"bytes"
+	"compress/gzip"
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"strings"
+	"sync"
+	"time"
+
+	"golang.org/x/sync/errgroup"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/remotes"
+	digest "github.com/opencontainers/go-digest"
+	specs "github.com/opencontainers/image-spec/specs-go"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+)
+
+const manifestSizeLimit = 8e6 // 8MB
+
+var (
+	mediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json"
+)
+
+type blobState struct {
+	diffID digest.Digest
+	empty  bool
+}
+
+// Converter converts schema1 manifests to schema2 on fetch
+type Converter struct {
+	contentStore content.Store
+	fetcher      remotes.Fetcher
+
+	pulledManifest *manifest
+
+	mu         sync.Mutex
+	blobMap    map[digest.Digest]blobState
+	layerBlobs map[digest.Digest]ocispec.Descriptor
+}
+
+// NewConverter returns a new converter
+func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter {
+	return &Converter{
+		contentStore: contentStore,
+		fetcher:      fetcher,
+		blobMap:      map[digest.Digest]blobState{},
+		layerBlobs:   map[digest.Digest]ocispec.Descriptor{},
+	}
+}
+
+// Handle fetching descriptors for a docker media type
+func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+	switch desc.MediaType {
+	case images.MediaTypeDockerSchema1Manifest:
+		if err := c.fetchManifest(ctx, desc); err != nil {
+			return nil, err
+		}
+
+		m := c.pulledManifest
+		if len(m.FSLayers) != len(m.History) {
+			return nil, errors.New("invalid schema 1 manifest, history and layer mismatch")
+		}
+		descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers))
+
+		for i := range m.FSLayers {
+			if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok {
+				empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility))
+				if err != nil {
+					return nil, err
+				}
+
+				// Do no attempt to download a known empty blob
+				if !empty {
+					descs = append([]ocispec.Descriptor{
+						{
+							MediaType: images.MediaTypeDockerSchema2LayerGzip,
+							Digest:    c.pulledManifest.FSLayers[i].BlobSum,
+						},
+					}, descs...)
+				}
+				c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{
+					empty: empty,
+				}
+			}
+		}
+		return descs, nil
+	case images.MediaTypeDockerSchema2LayerGzip:
+		if c.pulledManifest == nil {
+			return nil, errors.New("manifest required for schema 1 blob pull")
+		}
+		return nil, c.fetchBlob(ctx, desc)
+	default:
+		return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType)
+	}
+}
+
+// Convert a docker manifest to an OCI descriptor
+func (c *Converter) Convert(ctx context.Context) (ocispec.Descriptor, error) {
+	history, diffIDs, err := c.schema1ManifestHistory()
+	if err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "schema 1 conversion failed")
+	}
+
+	var img ocispec.Image
+	if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal image from schema 1 history")
+	}
+
+	img.History = history
+	img.RootFS = ocispec.RootFS{
+		Type:    "layers",
+		DiffIDs: diffIDs,
+	}
+
+	b, err := json.Marshal(img)
+	if err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image")
+	}
+
+	config := ocispec.Descriptor{
+		MediaType: ocispec.MediaTypeImageConfig,
+		Digest:    digest.Canonical.FromBytes(b),
+		Size:      int64(len(b)),
+	}
+
+	layers := make([]ocispec.Descriptor, len(diffIDs))
+	for i, diffID := range diffIDs {
+		layers[i] = c.layerBlobs[diffID]
+	}
+
+	manifest := ocispec.Manifest{
+		Versioned: specs.Versioned{
+			SchemaVersion: 2,
+		},
+		Config: config,
+		Layers: layers,
+	}
+
+	mb, err := json.Marshal(manifest)
+	if err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image")
+	}
+
+	desc := ocispec.Descriptor{
+		MediaType: ocispec.MediaTypeImageManifest,
+		Digest:    digest.Canonical.FromBytes(mb),
+		Size:      int64(len(mb)),
+	}
+
+	labels := map[string]string{}
+	labels["containerd.io/gc.root"] = time.Now().UTC().Format(time.RFC3339)
+	labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String()
+	for i, ch := range manifest.Layers {
+		labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String()
+	}
+
+	ref := remotes.MakeRefKey(ctx, desc)
+	if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc.Size, desc.Digest, content.WithLabels(labels)); err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config")
+	}
+
+	ref = remotes.MakeRefKey(ctx, config)
+	if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config.Size, config.Digest); err != nil {
+		return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config")
+	}
+
+	for _, ch := range manifest.Layers {
+		if _, err := c.contentStore.Update(ctx, content.Info{Digest: ch.Digest}, "labels.containerd.io/gc.root"); err != nil {
+			return ocispec.Descriptor{}, errors.Wrap(err, "failed to remove blob root tag")
+		}
+	}
+
+	return desc, nil
+}
+
+func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error {
+	log.G(ctx).Debug("fetch schema 1")
+
+	rc, err := c.fetcher.Fetch(ctx, desc)
+	if err != nil {
+		return err
+	}
+
+	b, err := ioutil.ReadAll(io.LimitReader(rc, manifestSizeLimit)) // limit to 8MB
+	rc.Close()
+	if err != nil {
+		return err
+	}
+
+	b, err = stripSignature(b)
+	if err != nil {
+		return err
+	}
+
+	var m manifest
+	if err := json.Unmarshal(b, &m); err != nil {
+		return err
+	}
+	c.pulledManifest = &m
+
+	return nil
+}
+
+func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error {
+	log.G(ctx).Debug("fetch blob")
+
+	ref := remotes.MakeRefKey(ctx, desc)
+
+	calc := newBlobStateCalculator()
+
+	cw, err := c.contentStore.Writer(ctx, ref, desc.Size, desc.Digest)
+	if err != nil {
+		if !errdefs.IsAlreadyExists(err) {
+			return err
+		}
+
+		// TODO: Check if blob -> diff id mapping already exists
+		// TODO: Check if blob empty label exists
+
+		ra, err := c.contentStore.ReaderAt(ctx, desc.Digest)
+		if err != nil {
+			return err
+		}
+		defer ra.Close()
+
+		gr, err := gzip.NewReader(content.NewReader(ra))
+		if err != nil {
+			return err
+		}
+		defer gr.Close()
+
+		_, err = io.Copy(calc, gr)
+		if err != nil {
+			return err
+		}
+	} else {
+		defer cw.Close()
+
+		rc, err := c.fetcher.Fetch(ctx, desc)
+		if err != nil {
+			return err
+		}
+		defer rc.Close()
+
+		eg, _ := errgroup.WithContext(ctx)
+		pr, pw := io.Pipe()
+
+		eg.Go(func() error {
+			gr, err := gzip.NewReader(pr)
+			if err != nil {
+				return err
+			}
+			defer gr.Close()
+
+			_, err = io.Copy(calc, gr)
+			pr.CloseWithError(err)
+			return err
+		})
+
+		eg.Go(func() error {
+			defer pw.Close()
+			opt := content.WithLabels(map[string]string{
+				"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
+			})
+			return content.Copy(ctx, cw, io.TeeReader(rc, pw), desc.Size, desc.Digest, opt)
+		})
+
+		if err := eg.Wait(); err != nil {
+			return err
+		}
+	}
+
+	if desc.Size == 0 {
+		info, err := c.contentStore.Info(ctx, desc.Digest)
+		if err != nil {
+			return errors.Wrap(err, "failed to get blob info")
+		}
+		desc.Size = info.Size
+	}
+
+	state := calc.State()
+
+	c.mu.Lock()
+	c.blobMap[desc.Digest] = state
+	c.layerBlobs[state.diffID] = desc
+	c.mu.Unlock()
+
+	return nil
+}
+func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) {
+	if c.pulledManifest == nil {
+		return nil, nil, errors.New("missing schema 1 manifest for conversion")
+	}
+	m := *c.pulledManifest
+
+	if len(m.History) == 0 {
+		return nil, nil, errors.New("no history")
+	}
+
+	history := make([]ocispec.History, len(m.History))
+	diffIDs := []digest.Digest{}
+	for i := range m.History {
+		var h v1History
+		if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil {
+			return nil, nil, errors.Wrap(err, "failed to unmarshal history")
+		}
+
+		blobSum := m.FSLayers[i].BlobSum
+
+		state := c.blobMap[blobSum]
+
+		history[len(history)-i-1] = ocispec.History{
+			Author:     h.Author,
+			Comment:    h.Comment,
+			Created:    &h.Created,
+			CreatedBy:  strings.Join(h.ContainerConfig.Cmd, " "),
+			EmptyLayer: state.empty,
+		}
+
+		if !state.empty {
+			diffIDs = append([]digest.Digest{state.diffID}, diffIDs...)
+
+		}
+	}
+
+	return history, diffIDs, nil
+}
+
+type fsLayer struct {
+	BlobSum digest.Digest `json:"blobSum"`
+}
+
+type history struct {
+	V1Compatibility string `json:"v1Compatibility"`
+}
+
+type manifest struct {
+	FSLayers []fsLayer `json:"fsLayers"`
+	History  []history `json:"history"`
+}
+
+type v1History struct {
+	Author          string    `json:"author,omitempty"`
+	Created         time.Time `json:"created"`
+	Comment         string    `json:"comment,omitempty"`
+	ThrowAway       *bool     `json:"throwaway,omitempty"`
+	Size            *int      `json:"Size,omitempty"` // used before ThrowAway field
+	ContainerConfig struct {
+		Cmd []string `json:"Cmd,omitempty"`
+	} `json:"container_config,omitempty"`
+}
+
+// isEmptyLayer returns whether the v1 compatibility history describes an
+// empty layer. A return value of true indicates the layer is empty,
+// however false does not indicate non-empty.
+func isEmptyLayer(compatHistory []byte) (bool, error) {
+	var h v1History
+	if err := json.Unmarshal(compatHistory, &h); err != nil {
+		return false, err
+	}
+
+	if h.ThrowAway != nil {
+		return *h.ThrowAway, nil
+	}
+	if h.Size != nil {
+		return *h.Size == 0, nil
+	}
+
+	// If no `Size` or `throwaway` field is given, then
+	// it cannot be determined whether the layer is empty
+	// from the history, return false
+	return false, nil
+}
+
+type signature struct {
+	Signatures []jsParsedSignature `json:"signatures"`
+}
+
+type jsParsedSignature struct {
+	Protected string `json:"protected"`
+}
+
+type protectedBlock struct {
+	Length int    `json:"formatLength"`
+	Tail   string `json:"formatTail"`
+}
+
+// joseBase64UrlDecode decodes the given string using the standard base64 url
+// decoder but first adds the appropriate number of trailing '=' characters in
+// accordance with the jose specification.
+// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
+func joseBase64UrlDecode(s string) ([]byte, error) {
+	switch len(s) % 4 {
+	case 0:
+	case 2:
+		s += "=="
+	case 3:
+		s += "="
+	default:
+		return nil, errors.New("illegal base64url string")
+	}
+	return base64.URLEncoding.DecodeString(s)
+}
+
+func stripSignature(b []byte) ([]byte, error) {
+	var sig signature
+	if err := json.Unmarshal(b, &sig); err != nil {
+		return nil, err
+	}
+	if len(sig.Signatures) == 0 {
+		return nil, errors.New("no signatures")
+	}
+	pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected)
+	if err != nil {
+		return nil, errors.Wrapf(err, "could not decode %s", sig.Signatures[0].Protected)
+	}
+
+	var protected protectedBlock
+	if err := json.Unmarshal(pb, &protected); err != nil {
+		return nil, err
+	}
+
+	if protected.Length > len(b) {
+		return nil, errors.New("invalid protected length block")
+	}
+
+	tail, err := joseBase64UrlDecode(protected.Tail)
+	if err != nil {
+		return nil, errors.Wrap(err, "invalid tail base 64 value")
+	}
+
+	return append(b[:protected.Length], tail...), nil
+}
+
+type blobStateCalculator struct {
+	empty    bool
+	digester digest.Digester
+}
+
+func newBlobStateCalculator() *blobStateCalculator {
+	return &blobStateCalculator{
+		empty:    true,
+		digester: digest.Canonical.Digester(),
+	}
+}
+
+func (c *blobStateCalculator) Write(p []byte) (int, error) {
+	if c.empty {
+		for _, b := range p {
+			if b != 0x00 {
+				c.empty = false
+				break
+			}
+		}
+	}
+	return c.digester.Hash().Write(p)
+}
+
+func (c *blobStateCalculator) State() blobState {
+	return blobState{
+		empty:  c.empty,
+		diffID: c.digester.Digest(),
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go
new file mode 100644
index 0000000..9cf0997
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go
@@ -0,0 +1,60 @@
+package docker
+
+import (
+	"context"
+	"net/url"
+	"sort"
+	"strings"
+
+	"github.com/containerd/containerd/reference"
+)
+
+// repositoryScope returns a repository scope string such as "repository:foo/bar:pull"
+// for "host/foo/bar:baz".
+// When push is true, both pull and push are added to the scope.
+func repositoryScope(refspec reference.Spec, push bool) (string, error) {
+	u, err := url.Parse("dummy://" + refspec.Locator)
+	if err != nil {
+		return "", err
+	}
+	s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull"
+	if push {
+		s += ",push"
+	}
+	return s, nil
+}
+
+// tokenScopesKey is used for the key for context.WithValue().
+// value: []string (e.g. {"registry:foo/bar:pull"})
+type tokenScopesKey struct{}
+
+// contextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value.
+func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) {
+	s, err := repositoryScope(refspec, push)
+	if err != nil {
+		return nil, err
+	}
+	return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil
+}
+
+// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and params["scope"].
+func getTokenScopes(ctx context.Context, params map[string]string) []string {
+	var scopes []string
+	if x := ctx.Value(tokenScopesKey{}); x != nil {
+		scopes = append(scopes, x.([]string)...)
+	}
+	if scope, ok := params["scope"]; ok {
+		for _, s := range scopes {
+			// Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/)
+			// So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal.
+			if s == scope {
+				// already appended
+				goto Sort
+			}
+		}
+		scopes = append(scopes, scope)
+	}
+Sort:
+	sort.Strings(scopes)
+	return scopes
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/docker/status.go b/vendor/github.com/containerd/containerd/remotes/docker/status.go
new file mode 100644
index 0000000..4b8dbbc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/docker/status.go
@@ -0,0 +1,51 @@
+package docker
+
+import (
+	"sync"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+)
+
+// Status of a content operation
+type Status struct {
+	content.Status
+
+	// UploadUUID is used by the Docker registry to reference blob uploads
+	UploadUUID string
+}
+
+// StatusTracker to track status of operations
+type StatusTracker interface {
+	GetStatus(string) (Status, error)
+	SetStatus(string, Status)
+}
+
+type memoryStatusTracker struct {
+	statuses map[string]Status
+	m        sync.Mutex
+}
+
+// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory
+func NewInMemoryTracker() StatusTracker {
+	return &memoryStatusTracker{
+		statuses: map[string]Status{},
+	}
+}
+
+func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) {
+	t.m.Lock()
+	defer t.m.Unlock()
+	status, ok := t.statuses[ref]
+	if !ok {
+		return Status{}, errors.Wrapf(errdefs.ErrNotFound, "status for ref %v", ref)
+	}
+	return status, nil
+}
+
+func (t *memoryStatusTracker) SetStatus(ref string, status Status) {
+	t.m.Lock()
+	t.statuses[ref] = status
+	t.m.Unlock()
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go
new file mode 100644
index 0000000..e6d2132
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/handlers.go
@@ -0,0 +1,218 @@
+package remotes
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	"time"
+
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+)
+
+// MakeRefKey returns a unique reference for the descriptor. This reference can be
+// used to lookup ongoing processes related to the descriptor. This function
+// may look to the context to namespace the reference appropriately.
+func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
+	// TODO(stevvooe): Need better remote key selection here. Should be a
+	// product of the context, which may include information about the ongoing
+	// fetch process.
+	switch desc.MediaType {
+	case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+		return "manifest-" + desc.Digest.String()
+	case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+		return "index-" + desc.Digest.String()
+	case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
+		images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip,
+		ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
+		ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip:
+		return "layer-" + desc.Digest.String()
+	case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
+		return "config-" + desc.Digest.String()
+	default:
+		log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType)
+		return "unknown-" + desc.Digest.String()
+	}
+}
+
+// FetchHandler returns a handler that will fetch all content into the ingester
+// discovered in a call to Dispatch. Use with ChildrenHandler to do a full
+// recursive fetch.
+func FetchHandler(ingester content.Ingester, fetcher Fetcher, root ocispec.Descriptor) images.HandlerFunc {
+	return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
+		ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
+			"digest":    desc.Digest,
+			"mediatype": desc.MediaType,
+			"size":      desc.Size,
+		}))
+
+		switch desc.MediaType {
+		case images.MediaTypeDockerSchema1Manifest:
+			return nil, fmt.Errorf("%v not supported", desc.MediaType)
+		default:
+			err := fetch(ctx, ingester, fetcher, desc, desc.Digest == root.Digest)
+			return nil, err
+		}
+	}
+}
+
+func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor, root bool) error {
+	log.G(ctx).Debug("fetch")
+
+	var (
+		ref   = MakeRefKey(ctx, desc)
+		cw    content.Writer
+		err   error
+		retry = 16
+	)
+	for {
+		cw, err = ingester.Writer(ctx, ref, desc.Size, desc.Digest)
+		if err != nil {
+			if errdefs.IsAlreadyExists(err) {
+				return nil
+			} else if !errdefs.IsUnavailable(err) {
+				return err
+			}
+
+			// TODO: On first time locked is encountered, get status
+			// of writer and abort if not updated recently.
+
+			select {
+			case <-time.After(time.Millisecond * time.Duration(retry)):
+				if retry < 2048 {
+					retry = retry << 1
+				}
+				continue
+			case <-ctx.Done():
+				// Propagate lock error
+				return err
+			}
+		}
+		defer cw.Close()
+		break
+	}
+
+	rc, err := fetcher.Fetch(ctx, desc)
+	if err != nil {
+		return err
+	}
+	defer rc.Close()
+
+	r, opts := commitOpts(desc, rc, root)
+	return content.Copy(ctx, cw, r, desc.Size, desc.Digest, opts...)
+}
+
+// commitOpts gets the appropriate content options to alter
+// the content info on commit based on media type.
+func commitOpts(desc ocispec.Descriptor, r io.Reader, root bool) (io.Reader, []content.Opt) {
+	var childrenF func(r io.Reader) ([]ocispec.Descriptor, error)
+
+	switch desc.MediaType {
+	case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
+		childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) {
+			var (
+				manifest ocispec.Manifest
+				decoder  = json.NewDecoder(r)
+			)
+			if err := decoder.Decode(&manifest); err != nil {
+				return nil, err
+			}
+
+			return append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...), nil
+		}
+	case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
+		childrenF = func(r io.Reader) ([]ocispec.Descriptor, error) {
+			var (
+				index   ocispec.Index
+				decoder = json.NewDecoder(r)
+			)
+			if err := decoder.Decode(&index); err != nil {
+				return nil, err
+			}
+
+			return index.Manifests, nil
+		}
+	default:
+		return r, nil
+	}
+
+	pr, pw := io.Pipe()
+
+	var children []ocispec.Descriptor
+	errC := make(chan error)
+
+	go func() {
+		defer close(errC)
+		ch, err := childrenF(pr)
+		if err != nil {
+			errC <- err
+		}
+		children = ch
+	}()
+
+	opt := func(info *content.Info) error {
+		err := <-errC
+		if err != nil {
+			return errors.Wrap(err, "unable to get commit labels")
+		}
+
+		if len(children) > 0 || root {
+			if info.Labels == nil {
+				info.Labels = map[string]string{}
+			}
+			if root {
+				info.Labels["containerd.io/gc.root"] = time.Now().UTC().Format(time.RFC3339)
+			}
+			for i, ch := range children {
+				info.Labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = ch.Digest.String()
+			}
+		}
+		return nil
+	}
+
+	return io.TeeReader(r, pw), []content.Opt{opt}
+}
+
+// PushHandler returns a handler that will push all content from the provider
+// using a writer from the pusher.
+func PushHandler(provider content.Provider, pusher Pusher) images.HandlerFunc {
+	return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
+		ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
+			"digest":    desc.Digest,
+			"mediatype": desc.MediaType,
+			"size":      desc.Size,
+		}))
+
+		err := push(ctx, provider, pusher, desc)
+		return nil, err
+	}
+}
+
+func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error {
+	log.G(ctx).Debug("push")
+
+	cw, err := pusher.Push(ctx, desc)
+	if err != nil {
+		if !errdefs.IsAlreadyExists(err) {
+			return err
+		}
+
+		return nil
+	}
+	defer cw.Close()
+
+	ra, err := provider.ReaderAt(ctx, desc.Digest)
+	if err != nil {
+		return err
+	}
+	defer ra.Close()
+
+	rd := io.NewSectionReader(ra, 0, desc.Size)
+	return content.Copy(ctx, cw, rd, desc.Size, desc.Digest)
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/hints.go b/vendor/github.com/containerd/containerd/remotes/hints.go
new file mode 100644
index 0000000..1694477
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/hints.go
@@ -0,0 +1,31 @@
+package remotes
+
+import "strings"
+
+// HintExists returns true if a hint of the provided kind and values exists in
+// the set of provided hints.
+func HintExists(kind, value string, hints ...string) bool {
+	for _, hint := range hints {
+		if strings.HasPrefix(hint, kind) && strings.HasSuffix(hint, value) {
+			return true
+		}
+	}
+
+	return false
+}
+
+// HintValues returns a slice of the values of the hints that match kind.
+func HintValues(kind string, hints ...string) []string {
+	var values []string
+	for _, hint := range hints {
+		if strings.HasPrefix(hint, kind) {
+			parts := strings.SplitN(hint, ":", 2)
+			if len(parts) < 2 {
+				continue
+			}
+			values = append(values, parts[1])
+		}
+	}
+
+	return values
+}
diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go
new file mode 100644
index 0000000..caf4c97
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/remotes/resolver.go
@@ -0,0 +1,64 @@
+package remotes
+
+import (
+	"context"
+	"io"
+
+	"github.com/containerd/containerd/content"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// Resolver provides remotes based on a locator.
+type Resolver interface {
+	// Resolve attempts to resolve the reference into a name and descriptor.
+	//
+	// The argument `ref` should be a scheme-less URI representing the remote.
+	// Structurally, it has a host and path. The "host" can be used to directly
+	// reference a specific host or be matched against a specific handler.
+	//
+	// The returned name should be used to identify the referenced entity.
+	// Dependending on the remote namespace, this may be immutable or mutable.
+	// While the name may differ from ref, it should itself be a valid ref.
+	//
+	// If the resolution fails, an error will be returned.
+	Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error)
+
+	// Fetcher returns a new fetcher for the provided reference.
+	// All content fetched from the returned fetcher will be
+	// from the namespace referred to by ref.
+	Fetcher(ctx context.Context, ref string) (Fetcher, error)
+
+	// Pusher returns a new pusher for the provided reference
+	Pusher(ctx context.Context, ref string) (Pusher, error)
+}
+
+// Fetcher fetches content
+type Fetcher interface {
+	// Fetch the resource identified by the descriptor.
+	Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
+}
+
+// Pusher pushes content
+type Pusher interface {
+	// Push returns a content writer for the given resource identified
+	// by the descriptor.
+	Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error)
+}
+
+// FetcherFunc allows package users to implement a Fetcher with just a
+// function.
+type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
+
+// Fetch content
+func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
+	return fn(ctx, desc)
+}
+
+// PusherFunc allows package users to implement a Pusher with just a
+// function.
+type PusherFunc func(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error
+
+// Push content
+func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error {
+	return fn(ctx, desc, r)
+}
diff --git a/vendor/github.com/containerd/containerd/rootfs/apply.go b/vendor/github.com/containerd/containerd/rootfs/apply.go
new file mode 100644
index 0000000..a198c99
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/rootfs/apply.go
@@ -0,0 +1,112 @@
+package rootfs
+
+import (
+	"crypto/rand"
+	"encoding/base64"
+	"fmt"
+	"time"
+
+	"github.com/containerd/containerd/diff"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/snapshot"
+	"github.com/opencontainers/go-digest"
+	"github.com/opencontainers/image-spec/identity"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+)
+
+// Layer represents the descriptors for a layer diff. These descriptions
+// include the descriptor for the uncompressed tar diff as well as a blob
+// used to transport that tar. The blob descriptor may or may not describe
+// a compressed object.
+type Layer struct {
+	Diff ocispec.Descriptor
+	Blob ocispec.Descriptor
+}
+
+// ApplyLayers applies all the layers using the given snapshotter and applier.
+// The returned result is a chain id digest representing all the applied layers.
+// Layers are applied in order they are given, making the first layer the
+// bottom-most layer in the layer chain.
+func ApplyLayers(ctx context.Context, layers []Layer, sn snapshot.Snapshotter, a diff.Differ) (digest.Digest, error) {
+	var chain []digest.Digest
+	for _, layer := range layers {
+		if _, err := ApplyLayer(ctx, layer, chain, sn, a); err != nil {
+			// TODO: possibly wait and retry if extraction of same chain id was in progress
+			return "", err
+		}
+
+		chain = append(chain, layer.Diff.Digest)
+	}
+	return identity.ChainID(chain), nil
+}
+
+// ApplyLayer applies a single layer on top of the given provided layer chain,
+// using the provided snapshotter and applier. If the layer was unpacked true
+// is returned, if the layer already exists false is returned.
+func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snapshot.Snapshotter, a diff.Differ, opts ...snapshot.Opt) (bool, error) {
+	var (
+		parent  = identity.ChainID(chain)
+		chainID = identity.ChainID(append(chain, layer.Diff.Digest))
+		diff    ocispec.Descriptor
+	)
+
+	_, err := sn.Stat(ctx, chainID.String())
+	if err == nil {
+		log.G(ctx).Debugf("Extraction not needed, layer snapshot exists")
+		return false, nil
+	} else if !errdefs.IsNotFound(err) {
+		return false, errors.Wrap(err, "failed to stat snapshot")
+	}
+
+	key := fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
+
+	// Prepare snapshot with from parent, label as root
+	mounts, err := sn.Prepare(ctx, key, parent.String(), opts...)
+	if err != nil {
+		//TODO: If is snapshot exists error, retry
+		return false, errors.Wrap(err, "failed to prepare extraction layer")
+	}
+	defer func() {
+		if err != nil {
+			log.G(ctx).WithError(err).WithField("key", key).Infof("Apply failure, attempting cleanup")
+			if rerr := sn.Remove(ctx, key); rerr != nil {
+				log.G(ctx).WithError(rerr).Warnf("Extraction snapshot %q removal failed", key)
+			}
+		}
+	}()
+
+	diff, err = a.Apply(ctx, layer.Blob, mounts)
+	if err != nil {
+		return false, errors.Wrapf(err, "failed to extract layer %s", layer.Diff.Digest)
+	}
+	if diff.Digest != layer.Diff.Digest {
+		err = errors.Errorf("wrong diff id calculated on extraction %q", diff.Digest)
+		return false, err
+	}
+
+	if err = sn.Commit(ctx, chainID.String(), key, opts...); err != nil {
+		if !errdefs.IsAlreadyExists(err) {
+			return false, errors.Wrapf(err, "failed to commit snapshot %s", parent)
+		}
+
+		// Destination already exists, cleanup key and return without error
+		err = nil
+		if err := sn.Remove(ctx, key); err != nil {
+			return false, errors.Wrapf(err, "failed to cleanup aborted apply %s", key)
+		}
+		return false, nil
+	}
+
+	return true, nil
+}
+
+func uniquePart() string {
+	t := time.Now()
+	var b [3]byte
+	// Ignore read failures, just decreases uniqueness
+	rand.Read(b[:])
+	return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:]))
+}
diff --git a/vendor/github.com/containerd/containerd/rootfs/diff.go b/vendor/github.com/containerd/containerd/rootfs/diff.go
new file mode 100644
index 0000000..035eb30
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/rootfs/diff.go
@@ -0,0 +1,46 @@
+package rootfs
+
+import (
+	"fmt"
+
+	"github.com/containerd/containerd/diff"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/snapshot"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"golang.org/x/net/context"
+)
+
+// Diff creates a layer diff for the given snapshot identifier from the parent
+// of the snapshot. A content ref is provided to track the progress of the
+// content creation and the provided snapshotter and mount differ are used
+// for calculating the diff. The descriptor for the layer diff is returned.
+func Diff(ctx context.Context, snapshotID string, sn snapshot.Snapshotter, d diff.Differ, opts ...diff.Opt) (ocispec.Descriptor, error) {
+	info, err := sn.Stat(ctx, snapshotID)
+	if err != nil {
+		return ocispec.Descriptor{}, err
+	}
+
+	lowerKey := fmt.Sprintf("%s-parent-view", info.Parent)
+	lower, err := sn.View(ctx, lowerKey, info.Parent)
+	if err != nil {
+		return ocispec.Descriptor{}, err
+	}
+	defer sn.Remove(ctx, lowerKey)
+
+	var upper []mount.Mount
+	if info.Kind == snapshot.KindActive {
+		upper, err = sn.Mounts(ctx, snapshotID)
+		if err != nil {
+			return ocispec.Descriptor{}, err
+		}
+	} else {
+		upperKey := fmt.Sprintf("%s-view", snapshotID)
+		upper, err = sn.View(ctx, upperKey, snapshotID)
+		if err != nil {
+			return ocispec.Descriptor{}, err
+		}
+		defer sn.Remove(ctx, lowerKey)
+	}
+
+	return d.DiffMounts(ctx, lower, upper, opts...)
+}
diff --git a/vendor/github.com/containerd/containerd/rootfs/init.go b/vendor/github.com/containerd/containerd/rootfs/init.go
new file mode 100644
index 0000000..271e6ce
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/rootfs/init.go
@@ -0,0 +1,101 @@
+package rootfs
+
+import (
+	"context"
+	"fmt"
+	"io/ioutil"
+	"os"
+
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/snapshot"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+)
+
+var (
+	initializers = map[string]initializerFunc{}
+)
+
+type initializerFunc func(string) error
+
+// Mounter handles mount and unmount
+type Mounter interface {
+	Mount(target string, mounts ...mount.Mount) error
+	Unmount(target string) error
+}
+
+// InitRootFS initializes the snapshot for use as a rootfs
+func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshot.Snapshotter, mounter Mounter) ([]mount.Mount, error) {
+	_, err := snapshotter.Stat(ctx, name)
+	if err == nil {
+		return nil, errors.Errorf("rootfs already exists")
+	}
+	// TODO: ensure not exist error once added to snapshot package
+
+	parentS := parent.String()
+
+	initName := defaultInitializer
+	initFn := initializers[initName]
+	if initFn != nil {
+		parentS, err = createInitLayer(ctx, parentS, initName, initFn, snapshotter, mounter)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if readonly {
+		return snapshotter.View(ctx, name, parentS)
+	}
+
+	return snapshotter.Prepare(ctx, name, parentS)
+}
+
+func createInitLayer(ctx context.Context, parent, initName string, initFn func(string) error, snapshotter snapshot.Snapshotter, mounter Mounter) (string, error) {
+	initS := fmt.Sprintf("%s %s", parent, initName)
+	if _, err := snapshotter.Stat(ctx, initS); err == nil {
+		return initS, nil
+	}
+	// TODO: ensure not exist error once added to snapshot package
+
+	// Create tempdir
+	td, err := ioutil.TempDir("", "create-init-")
+	if err != nil {
+		return "", err
+	}
+	defer os.RemoveAll(td)
+
+	mounts, err := snapshotter.Prepare(ctx, td, parent)
+	if err != nil {
+		return "", err
+	}
+	defer func() {
+		if err != nil {
+			// TODO: once implemented uncomment
+			//if rerr := snapshotter.Remove(ctx, td); rerr != nil {
+			//	log.G(ctx).Errorf("Failed to remove snapshot %s: %v", td, merr)
+			//}
+		}
+	}()
+
+	if err = mounter.Mount(td, mounts...); err != nil {
+		return "", err
+	}
+
+	if err = initFn(td); err != nil {
+		if merr := mounter.Unmount(td); merr != nil {
+			log.G(ctx).Errorf("Failed to unmount %s: %v", td, merr)
+		}
+		return "", err
+	}
+
+	if err = mounter.Unmount(td); err != nil {
+		return "", err
+	}
+
+	if err := snapshotter.Commit(ctx, initS, td); err != nil {
+		return "", err
+	}
+
+	return initS, nil
+}
diff --git a/vendor/github.com/containerd/containerd/rootfs/init_linux.go b/vendor/github.com/containerd/containerd/rootfs/init_linux.go
new file mode 100644
index 0000000..cabc457
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/rootfs/init_linux.go
@@ -0,0 +1,114 @@
+package rootfs
+
+import (
+	"os"
+	"path/filepath"
+	"syscall"
+)
+
+const (
+	defaultInitializer = "linux-init"
+)
+
+func init() {
+	initializers[defaultInitializer] = initFS
+}
+
+func createDirectory(name string, uid, gid int) initializerFunc {
+	return func(root string) error {
+		dname := filepath.Join(root, name)
+		st, err := os.Stat(dname)
+		if err != nil && !os.IsNotExist(err) {
+			return err
+		} else if err == nil {
+			if st.IsDir() {
+				stat := st.Sys().(*syscall.Stat_t)
+				if int(stat.Gid) == gid && int(stat.Uid) == uid {
+					return nil
+				}
+			} else {
+				if err := os.Remove(dname); err != nil {
+					return err
+				}
+				if err := os.Mkdir(dname, 0755); err != nil {
+					return err
+				}
+			}
+		} else {
+			if err := os.Mkdir(dname, 0755); err != nil {
+				return err
+			}
+		}
+
+		return os.Chown(dname, uid, gid)
+	}
+}
+
+func touchFile(name string, uid, gid int) initializerFunc {
+	return func(root string) error {
+		fname := filepath.Join(root, name)
+
+		st, err := os.Stat(fname)
+		if err != nil && !os.IsNotExist(err) {
+			return err
+		} else if err == nil {
+			stat := st.Sys().(*syscall.Stat_t)
+			if int(stat.Gid) == gid && int(stat.Uid) == uid {
+				return nil
+			}
+			return os.Chown(fname, uid, gid)
+		}
+
+		f, err := os.OpenFile(fname, os.O_CREATE, 0644)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+
+		return f.Chown(uid, gid)
+	}
+}
+
+func symlink(oldname, newname string) initializerFunc {
+	return func(root string) error {
+		linkName := filepath.Join(root, newname)
+		if _, err := os.Stat(linkName); err != nil && !os.IsNotExist(err) {
+			return err
+		} else if err == nil {
+			return nil
+		}
+		return os.Symlink(oldname, linkName)
+	}
+}
+
+func initFS(root string) error {
+	st, err := os.Stat(root)
+	if err != nil {
+		return err
+	}
+	stat := st.Sys().(*syscall.Stat_t)
+	uid := int(stat.Uid)
+	gid := int(stat.Gid)
+
+	initFuncs := []initializerFunc{
+		createDirectory("/dev", uid, gid),
+		createDirectory("/dev/pts", uid, gid),
+		createDirectory("/dev/shm", uid, gid),
+		touchFile("/dev/console", uid, gid),
+		createDirectory("/proc", uid, gid),
+		createDirectory("/sys", uid, gid),
+		createDirectory("/etc", uid, gid),
+		touchFile("/etc/resolv.conf", uid, gid),
+		touchFile("/etc/hosts", uid, gid),
+		touchFile("/etc/hostname", uid, gid),
+		symlink("/proc/mounts", "/etc/mtab"),
+	}
+
+	for _, fn := range initFuncs {
+		if err := fn(root); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/rootfs/init_other.go b/vendor/github.com/containerd/containerd/rootfs/init_other.go
new file mode 100644
index 0000000..b5e04e2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/rootfs/init_other.go
@@ -0,0 +1,7 @@
+// +build !linux
+
+package rootfs
+
+const (
+	defaultInitializer = ""
+)
diff --git a/vendor/github.com/containerd/containerd/runtime/container.go b/vendor/github.com/containerd/containerd/runtime/container.go
deleted file mode 100644
index 9e1d24f..0000000
--- a/vendor/github.com/containerd/containerd/runtime/container.go
+++ /dev/null
@@ -1,748 +0,0 @@
-package runtime
-
-import (
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strings"
-	"syscall"
-	"time"
-
-	"github.com/containerd/containerd/specs"
-	ocs "github.com/opencontainers/runtime-spec/specs-go"
-	"github.com/sirupsen/logrus"
-	"golang.org/x/net/context"
-	"golang.org/x/sys/unix"
-)
-
-// Container defines the operations allowed on a container
-type Container interface {
-	// ID returns the container ID
-	ID() string
-	// Path returns the path to the bundle
-	Path() string
-	// Start starts the init process of the container
-	Start(ctx context.Context, checkpointPath string, s Stdio) (Process, error)
-	// Exec starts another process in an existing container
-	Exec(context.Context, string, specs.ProcessSpec, Stdio) (Process, error)
-	// Delete removes the container's state and any resources
-	Delete() error
-	// Processes returns all the containers processes that have been added
-	Processes() ([]Process, error)
-	// State returns the containers runtime state
-	State() State
-	// Resume resumes a paused container
-	Resume() error
-	// Pause pauses a running container
-	Pause() error
-	// RemoveProcess removes the specified process from the container
-	RemoveProcess(string) error
-	// Checkpoints returns all the checkpoints for a container
-	Checkpoints(checkpointDir string) ([]Checkpoint, error)
-	// Checkpoint creates a new checkpoint
-	Checkpoint(checkpoint Checkpoint, checkpointDir string) error
-	// DeleteCheckpoint deletes the checkpoint for the provided name
-	DeleteCheckpoint(name string, checkpointDir string) error
-	// Labels are user provided labels for the container
-	Labels() []string
-	// Pids returns all pids inside the container
-	Pids() ([]int, error)
-	// Stats returns realtime container stats and resource information
-	Stats() (*Stat, error)
-	// Name or path of the OCI compliant runtime used to execute the container
-	Runtime() string
-	// OOM signals the channel if the container received an OOM notification
-	OOM() (OOM, error)
-	// UpdateResource updates the containers resources to new values
-	UpdateResources(*Resource) error
-
-	// Status return the current status of the container.
-	Status() (State, error)
-}
-
-// OOM wraps a container OOM.
-type OOM interface {
-	io.Closer
-	FD() int
-	ContainerID() string
-	Flush()
-	Removed() bool
-}
-
-// Stdio holds the path to the 3 pipes used for the standard ios.
-type Stdio struct {
-	Stdin  string
-	Stdout string
-	Stderr string
-}
-
-// NewStdio wraps the given standard io path into an Stdio struct.
-// If a given parameter is the empty string, it is replaced by "/dev/null"
-func NewStdio(stdin, stdout, stderr string) Stdio {
-	for _, s := range []*string{
-		&stdin, &stdout, &stderr,
-	} {
-		if *s == "" {
-			*s = "/dev/null"
-		}
-	}
-	return Stdio{
-		Stdin:  stdin,
-		Stdout: stdout,
-		Stderr: stderr,
-	}
-}
-
-// ContainerOpts keeps the options passed at container creation
-type ContainerOpts struct {
-	Root        string
-	ID          string
-	Bundle      string
-	Runtime     string
-	RuntimeArgs []string
-	Shim        string
-	Labels      []string
-	NoPivotRoot bool
-	Timeout     time.Duration
-}
-
-// New returns a new container
-func New(opts ContainerOpts) (Container, error) {
-	c := &container{
-		root:        opts.Root,
-		id:          opts.ID,
-		bundle:      opts.Bundle,
-		labels:      opts.Labels,
-		processes:   make(map[string]*process),
-		runtime:     opts.Runtime,
-		runtimeArgs: opts.RuntimeArgs,
-		shim:        opts.Shim,
-		noPivotRoot: opts.NoPivotRoot,
-		timeout:     opts.Timeout,
-	}
-	if err := os.Mkdir(filepath.Join(c.root, c.id), 0755); err != nil {
-		return nil, err
-	}
-	f, err := os.Create(filepath.Join(c.root, c.id, StateFile))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	if err := json.NewEncoder(f).Encode(state{
-		Bundle:      c.bundle,
-		Labels:      c.labels,
-		Runtime:     c.runtime,
-		RuntimeArgs: c.runtimeArgs,
-		Shim:        c.shim,
-		NoPivotRoot: opts.NoPivotRoot,
-	}); err != nil {
-		return nil, err
-	}
-	return c, nil
-}
-
-// Load return a new container from the matchin state file on disk.
-func Load(root, id, shimName string, timeout time.Duration) (Container, error) {
-	var s state
-	f, err := os.Open(filepath.Join(root, id, StateFile))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	if err := json.NewDecoder(f).Decode(&s); err != nil {
-		return nil, err
-	}
-	c := &container{
-		root:        root,
-		id:          id,
-		bundle:      s.Bundle,
-		labels:      s.Labels,
-		runtime:     s.Runtime,
-		runtimeArgs: s.RuntimeArgs,
-		shim:        s.Shim,
-		noPivotRoot: s.NoPivotRoot,
-		processes:   make(map[string]*process),
-		timeout:     timeout,
-	}
-
-	if c.shim == "" {
-		c.shim = shimName
-	}
-
-	dirs, err := ioutil.ReadDir(filepath.Join(root, id))
-	if err != nil {
-		return nil, err
-	}
-	for _, d := range dirs {
-		if !d.IsDir() {
-			continue
-		}
-		pid := d.Name()
-		s, err := readProcessState(filepath.Join(root, id, pid))
-		if err != nil {
-			return nil, err
-		}
-		p, err := loadProcess(filepath.Join(root, id, pid), pid, c, s)
-		if err != nil {
-			logrus.WithField("id", id).WithField("pid", pid).Debugf("containerd: error loading process %s", err)
-			continue
-		}
-		c.processes[pid] = p
-	}
-
-	_, err = os.Stat(c.bundle)
-	if err != nil && !os.IsExist(err) {
-		for key, p := range c.processes {
-			if key == InitProcessID {
-				p.Delete()
-				break
-			}
-		}
-		return nil, fmt.Errorf("bundle dir %s don't exist", c.bundle)
-	}
-	return c, nil
-}
-
-func readProcessState(dir string) (*ProcessState, error) {
-	f, err := os.Open(filepath.Join(dir, "process.json"))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	var s ProcessState
-	if err := json.NewDecoder(f).Decode(&s); err != nil {
-		return nil, err
-	}
-	return &s, nil
-}
-
-type container struct {
-	// path to store runtime state information
-	root        string
-	id          string
-	bundle      string
-	runtime     string
-	runtimeArgs []string
-	shim        string
-	processes   map[string]*process
-	labels      []string
-	oomFds      []int
-	noPivotRoot bool
-	timeout     time.Duration
-}
-
-func (c *container) ID() string {
-	return c.id
-}
-
-func (c *container) Path() string {
-	return c.bundle
-}
-
-func (c *container) Labels() []string {
-	return c.labels
-}
-
-func (c *container) readSpec() (*specs.Spec, error) {
-	var spec specs.Spec
-	f, err := os.Open(filepath.Join(c.bundle, "config.json"))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	if err := json.NewDecoder(f).Decode(&spec); err != nil {
-		return nil, err
-	}
-	return &spec, nil
-}
-
-func (c *container) Delete() error {
-	var err error
-	args := append(c.runtimeArgs, "delete", c.id)
-	if b, derr := exec.Command(c.runtime, args...).CombinedOutput(); derr != nil && !strings.Contains(string(b), "does not exist") {
-		err = fmt.Errorf("%s: %q", derr, string(b))
-	}
-	if rerr := os.RemoveAll(filepath.Join(c.root, c.id)); rerr != nil {
-		if err != nil {
-			err = fmt.Errorf("%s; failed to remove %s: %s", err, filepath.Join(c.root, c.id), rerr)
-		} else {
-			err = rerr
-		}
-	}
-	return err
-}
-
-func (c *container) Processes() ([]Process, error) {
-	out := []Process{}
-	for _, p := range c.processes {
-		out = append(out, p)
-	}
-	return out, nil
-}
-
-func (c *container) RemoveProcess(pid string) error {
-	delete(c.processes, pid)
-	return os.RemoveAll(filepath.Join(c.root, c.id, pid))
-}
-
-func (c *container) State() State {
-	proc := c.processes[InitProcessID]
-	if proc == nil {
-		return Stopped
-	}
-	return proc.State()
-}
-
-func (c *container) Runtime() string {
-	return c.runtime
-}
-
-func (c *container) Pause() error {
-	args := c.runtimeArgs
-	args = append(args, "pause", c.id)
-	b, err := exec.Command(c.runtime, args...).CombinedOutput()
-	if err != nil {
-		return fmt.Errorf("%s: %q", err.Error(), string(b))
-	}
-	return nil
-}
-
-func (c *container) Resume() error {
-	args := c.runtimeArgs
-	args = append(args, "resume", c.id)
-	b, err := exec.Command(c.runtime, args...).CombinedOutput()
-	if err != nil {
-		return fmt.Errorf("%s: %q", err.Error(), string(b))
-	}
-	return nil
-}
-
-func (c *container) Checkpoints(checkpointDir string) ([]Checkpoint, error) {
-	if checkpointDir == "" {
-		checkpointDir = filepath.Join(c.bundle, "checkpoints")
-	}
-
-	dirs, err := ioutil.ReadDir(checkpointDir)
-	if err != nil {
-		return nil, err
-	}
-	var out []Checkpoint
-	for _, d := range dirs {
-		if !d.IsDir() {
-			continue
-		}
-		path := filepath.Join(checkpointDir, d.Name(), "config.json")
-		data, err := ioutil.ReadFile(path)
-		if err != nil {
-			return nil, err
-		}
-		var cpt Checkpoint
-		if err := json.Unmarshal(data, &cpt); err != nil {
-			return nil, err
-		}
-		out = append(out, cpt)
-	}
-	return out, nil
-}
-
-func (c *container) Checkpoint(cpt Checkpoint, checkpointDir string) error {
-	if checkpointDir == "" {
-		checkpointDir = filepath.Join(c.bundle, "checkpoints")
-	}
-
-	if err := os.MkdirAll(checkpointDir, 0755); err != nil {
-		return err
-	}
-
-	path := filepath.Join(checkpointDir, cpt.Name)
-	if err := os.Mkdir(path, 0755); err != nil {
-		return err
-	}
-	f, err := os.Create(filepath.Join(path, "config.json"))
-	if err != nil {
-		return err
-	}
-	cpt.Created = time.Now()
-	err = json.NewEncoder(f).Encode(cpt)
-	f.Close()
-	if err != nil {
-		return err
-	}
-	args := []string{
-		"checkpoint",
-		"--image-path", path,
-		"--work-path", filepath.Join(path, "criu.work"),
-	}
-	add := func(flags ...string) {
-		args = append(args, flags...)
-	}
-	add(c.runtimeArgs...)
-	if !cpt.Exit {
-		add("--leave-running")
-	}
-	if cpt.Shell {
-		add("--shell-job")
-	}
-	if cpt.TCP {
-		add("--tcp-established")
-	}
-	if cpt.UnixSockets {
-		add("--ext-unix-sk")
-	}
-	for _, ns := range cpt.EmptyNS {
-		add("--empty-ns", ns)
-	}
-	add(c.id)
-	out, err := exec.Command(c.runtime, args...).CombinedOutput()
-	if err != nil {
-		return fmt.Errorf("%s: %q", err.Error(), string(out))
-	}
-	return err
-}
-
-func (c *container) DeleteCheckpoint(name string, checkpointDir string) error {
-	if checkpointDir == "" {
-		checkpointDir = filepath.Join(c.bundle, "checkpoints")
-	}
-	return os.RemoveAll(filepath.Join(checkpointDir, name))
-}
-
-func (c *container) Start(ctx context.Context, checkpointPath string, s Stdio) (Process, error) {
-	processRoot := filepath.Join(c.root, c.id, InitProcessID)
-	if err := os.Mkdir(processRoot, 0755); err != nil {
-		return nil, err
-	}
-	cmd := exec.Command(c.shim,
-		c.id, c.bundle, c.runtime,
-	)
-	cmd.Dir = processRoot
-	cmd.SysProcAttr = &syscall.SysProcAttr{
-		Setpgid: true,
-	}
-	spec, err := c.readSpec()
-	if err != nil {
-		return nil, err
-	}
-	config := &processConfig{
-		checkpoint:  checkpointPath,
-		root:        processRoot,
-		id:          InitProcessID,
-		c:           c,
-		stdio:       s,
-		spec:        spec,
-		processSpec: specs.ProcessSpec(*spec.Process),
-	}
-	p, err := newProcess(config)
-	if err != nil {
-		return nil, err
-	}
-	if err := c.createCmd(ctx, InitProcessID, cmd, p); err != nil {
-		return nil, err
-	}
-	return p, nil
-}
-
-func (c *container) Exec(ctx context.Context, pid string, pspec specs.ProcessSpec, s Stdio) (pp Process, err error) {
-	processRoot := filepath.Join(c.root, c.id, pid)
-	if err := os.Mkdir(processRoot, 0755); err != nil {
-		return nil, err
-	}
-	defer func() {
-		if err != nil {
-			c.RemoveProcess(pid)
-		}
-	}()
-	cmd := exec.Command(c.shim,
-		c.id, c.bundle, c.runtime,
-	)
-	cmd.Dir = processRoot
-	cmd.SysProcAttr = &syscall.SysProcAttr{
-		Setpgid: true,
-	}
-	spec, err := c.readSpec()
-	if err != nil {
-		return nil, err
-	}
-	config := &processConfig{
-		exec:        true,
-		id:          pid,
-		root:        processRoot,
-		c:           c,
-		processSpec: pspec,
-		spec:        spec,
-		stdio:       s,
-	}
-	p, err := newProcess(config)
-	if err != nil {
-		return nil, err
-	}
-	if err := c.createCmd(ctx, pid, cmd, p); err != nil {
-		return nil, err
-	}
-	return p, nil
-}
-
-func (c *container) createCmd(ctx context.Context, pid string, cmd *exec.Cmd, p *process) error {
-	p.cmd = cmd
-	if err := cmd.Start(); err != nil {
-		close(p.cmdDoneCh)
-		if exErr, ok := err.(*exec.Error); ok {
-			if exErr.Err == exec.ErrNotFound || exErr.Err == os.ErrNotExist {
-				return fmt.Errorf("%s not installed on system", c.shim)
-			}
-		}
-		return err
-	}
-	// We need the pid file to have been written to run
-	defer func() {
-		go func() {
-			err := p.cmd.Wait()
-			if err == nil {
-				p.cmdSuccess = true
-			}
-
-			if same, err := p.isSameProcess(); same && p.pid > 0 {
-				// The process changed its PR_SET_PDEATHSIG, so force
-				// kill it
-				logrus.Infof("containerd: %s:%s (pid %v) has become an orphan, killing it", p.container.id, p.id, p.pid)
-				err = unix.Kill(p.pid, syscall.SIGKILL)
-				if err != nil && err != syscall.ESRCH {
-					logrus.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err)
-				} else {
-					for {
-						err = unix.Kill(p.pid, 0)
-						if err != nil {
-							break
-						}
-						time.Sleep(5 * time.Millisecond)
-					}
-				}
-			}
-			close(p.cmdDoneCh)
-		}()
-	}()
-
-	ch := make(chan error)
-	go func() {
-		if err := c.waitForCreate(p, cmd); err != nil {
-			ch <- err
-			return
-		}
-		c.processes[pid] = p
-		ch <- nil
-	}()
-	select {
-	case <-ctx.Done():
-		cmd.Process.Kill()
-		cmd.Wait()
-		<-ch
-		return ctx.Err()
-	case err := <-ch:
-		return err
-	}
-}
-
-func hostIDFromMap(id uint32, mp []ocs.LinuxIDMapping) int {
-	for _, m := range mp {
-		if (id >= m.ContainerID) && (id <= (m.ContainerID + m.Size - 1)) {
-			return int(m.HostID + (id - m.ContainerID))
-		}
-	}
-	return 0
-}
-
-func (c *container) Stats() (*Stat, error) {
-	now := time.Now()
-	args := c.runtimeArgs
-	args = append(args, "events", "--stats", c.id)
-	out, err := exec.Command(c.runtime, args...).CombinedOutput()
-	if err != nil {
-		return nil, fmt.Errorf("%s: %q", err.Error(), out)
-	}
-	s := struct {
-		Data *Stat `json:"data"`
-	}{}
-	if err := json.Unmarshal(out, &s); err != nil {
-		return nil, err
-	}
-	s.Data.Timestamp = now
-	return s.Data, nil
-}
-
-// Status implements the runtime Container interface.
-func (c *container) Status() (State, error) {
-	args := c.runtimeArgs
-	args = append(args, "state", c.id)
-
-	out, err := exec.Command(c.runtime, args...).CombinedOutput()
-	if err != nil {
-		return "", fmt.Errorf("%s: %q", err.Error(), out)
-	}
-
-	// We only require the runtime json output to have a top level Status field.
-	var s struct {
-		Status State `json:"status"`
-	}
-	if err := json.Unmarshal(out, &s); err != nil {
-		return "", err
-	}
-	return s.Status, nil
-}
-
-func (c *container) writeEventFD(root string, cfd, efd int) error {
-	f, err := os.OpenFile(filepath.Join(root, "cgroup.event_control"), os.O_WRONLY, 0)
-	if err != nil {
-		return err
-	}
-	defer f.Close()
-	_, err = f.WriteString(fmt.Sprintf("%d %d", efd, cfd))
-	return err
-}
-
-type waitArgs struct {
-	pid int
-	err error
-}
-
-func (c *container) waitForCreate(p *process, cmd *exec.Cmd) error {
-	wc := make(chan error, 1)
-	go func() {
-		for {
-			if _, err := p.getPidFromFile(); err != nil {
-				if os.IsNotExist(err) || err == errInvalidPidInt || err == errContainerNotFound {
-					alive, err := isAlive(cmd)
-					if err != nil {
-						wc <- err
-						return
-					}
-					if !alive {
-						// runc could have failed to run the container so lets get the error
-						// out of the logs or the shim could have encountered an error
-						messages, err := readLogMessages(filepath.Join(p.root, "shim-log.json"))
-						if err != nil {
-							wc <- err
-							return
-						}
-						for _, m := range messages {
-							if m.Level == "error" {
-								wc <- fmt.Errorf("shim error: %v", m.Msg)
-								return
-							}
-						}
-						// no errors reported back from shim, check for runc/runtime errors
-						messages, err = readLogMessages(filepath.Join(p.root, "log.json"))
-						if err != nil {
-							if os.IsNotExist(err) {
-								err = ErrContainerNotStarted
-							}
-							wc <- err
-							return
-						}
-						for _, m := range messages {
-							if m.Level == "error" {
-								wc <- fmt.Errorf("oci runtime error: %v", m.Msg)
-								return
-							}
-						}
-						wc <- ErrContainerNotStarted
-						return
-					}
-					time.Sleep(15 * time.Millisecond)
-					continue
-				}
-				wc <- err
-				return
-			}
-			// the pid file was read successfully
-			wc <- nil
-			return
-		}
-	}()
-	select {
-	case err := <-wc:
-		if err != nil {
-			return err
-		}
-		err = p.saveStartTime()
-		if err != nil && !os.IsNotExist(err) {
-			logrus.Warnf("containerd: unable to save %s:%s starttime: %v", p.container.id, p.id, err)
-		}
-		return nil
-	case <-time.After(c.timeout):
-		cmd.Process.Kill()
-		cmd.Wait()
-		return ErrContainerStartTimeout
-	}
-}
-
-// isAlive checks if the shim that launched the container is still alive
-func isAlive(cmd *exec.Cmd) (bool, error) {
-	if _, err := syscall.Wait4(cmd.Process.Pid, nil, syscall.WNOHANG, nil); err == nil {
-		return true, nil
-	}
-	if err := syscall.Kill(cmd.Process.Pid, 0); err != nil {
-		if err == syscall.ESRCH {
-			return false, nil
-		}
-		return false, err
-	}
-	return true, nil
-}
-
-type oom struct {
-	id      string
-	root    string
-	eventfd int
-}
-
-func (o *oom) ContainerID() string {
-	return o.id
-}
-
-func (o *oom) FD() int {
-	return o.eventfd
-}
-
-func (o *oom) Flush() {
-	buf := make([]byte, 8)
-	syscall.Read(o.eventfd, buf)
-}
-
-func (o *oom) Removed() bool {
-	_, err := os.Lstat(filepath.Join(o.root, "cgroup.event_control"))
-	return os.IsNotExist(err)
-}
-
-func (o *oom) Close() error {
-	return syscall.Close(o.eventfd)
-}
-
-type message struct {
-	Level string `json:"level"`
-	Msg   string `json:"msg"`
-}
-
-func readLogMessages(path string) ([]message, error) {
-	var out []message
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	dec := json.NewDecoder(f)
-	for {
-		var m message
-		if err := dec.Decode(&m); err != nil {
-			if err == io.EOF {
-				break
-			}
-			return nil, err
-		}
-		out = append(out, m)
-	}
-	return out, nil
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/container_linux.go b/vendor/github.com/containerd/containerd/runtime/container_linux.go
deleted file mode 100644
index 265a38c..0000000
--- a/vendor/github.com/containerd/containerd/runtime/container_linux.go
+++ /dev/null
@@ -1,192 +0,0 @@
-package runtime
-
-import (
-	"bufio"
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strings"
-	"syscall"
-
-	"github.com/containerd/containerd/specs"
-	ocs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func findCgroupMountpointAndRoot(pid int, subsystem string) (string, string, error) {
-	f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
-	if err != nil {
-		return "", "", err
-	}
-	defer f.Close()
-
-	scanner := bufio.NewScanner(f)
-	for scanner.Scan() {
-		txt := scanner.Text()
-		fields := strings.Split(txt, " ")
-		for _, opt := range strings.Split(fields[len(fields)-1], ",") {
-			if opt == subsystem {
-				return fields[4], fields[3], nil
-			}
-		}
-	}
-	if err := scanner.Err(); err != nil {
-		return "", "", err
-	}
-
-	return "", "", fmt.Errorf("cgroup path for %s not found", subsystem)
-}
-
-func parseCgroupFile(path string) (map[string]string, error) {
-	f, err := os.Open(path)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-
-	s := bufio.NewScanner(f)
-	cgroups := make(map[string]string)
-
-	for s.Scan() {
-		if err := s.Err(); err != nil {
-			return nil, err
-		}
-
-		text := s.Text()
-		parts := strings.Split(text, ":")
-
-		for _, subs := range strings.Split(parts[1], ",") {
-			cgroups[subs] = parts[2]
-		}
-	}
-	return cgroups, nil
-}
-
-func (c *container) OOM() (OOM, error) {
-	p := c.processes[InitProcessID]
-	if p == nil {
-		return nil, fmt.Errorf("no init process found")
-	}
-
-	mountpoint, hostRoot, err := findCgroupMountpointAndRoot(os.Getpid(), "memory")
-	if err != nil {
-		return nil, err
-	}
-
-	cgroups, err := parseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", p.pid))
-	if err != nil {
-		return nil, err
-	}
-
-	root, ok := cgroups["memory"]
-	if !ok {
-		return nil, fmt.Errorf("no memory cgroup for container %s", c.ID())
-	}
-
-	// Take care of the case were we're running inside a container
-	// ourself
-	root = strings.TrimPrefix(root, hostRoot)
-
-	return c.getMemoryEventFD(filepath.Join(mountpoint, root))
-}
-
-func (c *container) Pids() ([]int, error) {
-	var pids []int
-	args := c.runtimeArgs
-	args = append(args, "ps", "--format=json", c.id)
-	out, err := exec.Command(c.runtime, args...).CombinedOutput()
-	if err != nil {
-		return nil, fmt.Errorf("%s: %q", err.Error(), out)
-	}
-	if err := json.Unmarshal(out, &pids); err != nil {
-		return nil, err
-	}
-	return pids, nil
-}
-
-func u64Ptr(i uint64) *uint64 { return &i }
-func i64Ptr(i int64) *int64   { return &i }
-
-func (c *container) UpdateResources(r *Resource) error {
-	sr := ocs.LinuxResources{
-		Memory: &ocs.LinuxMemory{
-			Limit:       i64Ptr(r.Memory),
-			Reservation: i64Ptr(r.MemoryReservation),
-			Swap:        i64Ptr(r.MemorySwap),
-			Kernel:      i64Ptr(r.KernelMemory),
-			KernelTCP:   i64Ptr(r.KernelTCPMemory),
-		},
-		CPU: &ocs.LinuxCPU{
-			Shares:          u64Ptr(uint64(r.CPUShares)),
-			Quota:           i64Ptr(int64(r.CPUQuota)),
-			Period:          u64Ptr(uint64(r.CPUPeriod)),
-			Cpus:            r.CpusetCpus,
-			Mems:            r.CpusetMems,
-			RealtimePeriod:  u64Ptr(uint64(r.CPURealtimePeriod)),
-			RealtimeRuntime: i64Ptr(int64(r.CPURealtimdRuntime)),
-		},
-		BlockIO: &ocs.LinuxBlockIO{
-			Weight: &r.BlkioWeight,
-		},
-		Pids: &ocs.LinuxPids{
-			Limit: r.PidsLimit,
-		},
-	}
-
-	srStr := bytes.NewBuffer(nil)
-	if err := json.NewEncoder(srStr).Encode(&sr); err != nil {
-		return err
-	}
-
-	args := c.runtimeArgs
-	args = append(args, "update", "-r", "-", c.id)
-	cmd := exec.Command(c.runtime, args...)
-	cmd.Stdin = srStr
-	b, err := cmd.CombinedOutput()
-	if err != nil {
-		return fmt.Errorf(string(b))
-	}
-	return nil
-}
-
-func getRootIDs(s *specs.Spec) (int, int, error) {
-	if s == nil {
-		return 0, 0, nil
-	}
-	var hasUserns bool
-	for _, ns := range s.Linux.Namespaces {
-		if ns.Type == ocs.UserNamespace {
-			hasUserns = true
-			break
-		}
-	}
-	if !hasUserns {
-		return 0, 0, nil
-	}
-	uid := hostIDFromMap(0, s.Linux.UIDMappings)
-	gid := hostIDFromMap(0, s.Linux.GIDMappings)
-	return uid, gid, nil
-}
-
-func (c *container) getMemoryEventFD(root string) (*oom, error) {
-	f, err := os.Open(filepath.Join(root, "memory.oom_control"))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-	fd, _, serr := syscall.RawSyscall(syscall.SYS_EVENTFD2, 0, syscall.FD_CLOEXEC, 0)
-	if serr != 0 {
-		return nil, serr
-	}
-	if err := c.writeEventFD(root, int(f.Fd()), int(fd)); err != nil {
-		syscall.Close(int(fd))
-		return nil, err
-	}
-	return &oom{
-		root:    root,
-		id:      c.id,
-		eventfd: int(fd),
-	}, nil
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/container_solaris.go b/vendor/github.com/containerd/containerd/runtime/container_solaris.go
deleted file mode 100644
index 7d9c538..0000000
--- a/vendor/github.com/containerd/containerd/runtime/container_solaris.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package runtime
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"os/exec"
-	"strings"
-
-	"github.com/containerd/containerd/specs"
-	ocs "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-func getRootIDs(s *specs.Spec) (int, int, error) {
-	return 0, 0, nil
-}
-
-func (c *container) OOM() (OOM, error) {
-	return nil, nil
-}
-
-func (c *container) Pids() ([]int, error) {
-	var pids []int
-
-	// TODO: This could be racy. Needs more investigation.
-	//we get this information from runz state
-	cmd := exec.Command(c.runtime, "state", c.id)
-	outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
-	cmd.Stdout, cmd.Stderr = outBuf, errBuf
-
-	if err := cmd.Run(); err != nil {
-		if strings.Contains(errBuf.String(), "Container not found") {
-			return nil, errContainerNotFound
-		}
-		return nil, fmt.Errorf("Error is: %+v\n", err)
-	}
-	response := ocs.State{}
-	decoder := json.NewDecoder(outBuf)
-	if err := decoder.Decode(&response); err != nil {
-		return nil, fmt.Errorf("unable to decode json response: %+v", err)
-	}
-	pids = append(pids, response.Pid)
-	return pids, nil
-}
-
-func (c *container) UpdateResources(r *Resource) error {
-	return nil
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/events.go b/vendor/github.com/containerd/containerd/runtime/events.go
new file mode 100644
index 0000000..36b701d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/runtime/events.go
@@ -0,0 +1,26 @@
+package runtime
+
+const (
+	// TaskCreateEventTopic for task create
+	TaskCreateEventTopic = "/tasks/create"
+	// TaskStartEventTopic for task start
+	TaskStartEventTopic = "/tasks/start"
+	// TaskOOMEventTopic for task oom
+	TaskOOMEventTopic = "/tasks/oom"
+	// TaskExitEventTopic for task exit
+	TaskExitEventTopic = "/tasks/exit"
+	// TaskDeleteEventTopic for task delete
+	TaskDeleteEventTopic = "/tasks/delete"
+	// TaskExecAddedEventTopic for task exec create
+	TaskExecAddedEventTopic = "/tasks/exec-added"
+	// TaskExecStartedEventTopic for task exec start
+	TaskExecStartedEventTopic = "/tasks/exec-started"
+	// TaskPausedEventTopic for task pause
+	TaskPausedEventTopic = "/tasks/paused"
+	// TaskResumedEventTopic for task resume
+	TaskResumedEventTopic = "/tasks/resumed"
+	// TaskCheckpointedEventTopic for task checkpoint
+	TaskCheckpointedEventTopic = "/tasks/checkpointed"
+	// TaskUnknownTopic for unknown task events
+	TaskUnknownTopic = "/tasks/?"
+)
diff --git a/vendor/github.com/containerd/containerd/runtime/monitor.go b/vendor/github.com/containerd/containerd/runtime/monitor.go
new file mode 100644
index 0000000..f5f8f1c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/runtime/monitor.go
@@ -0,0 +1,54 @@
+package runtime
+
+// TaskMonitor provides an interface for monitoring of containers within containerd
+type TaskMonitor interface {
+	// Monitor adds the provided container to the monitor
+	Monitor(Task) error
+	// Stop stops and removes the provided container from the monitor
+	Stop(Task) error
+}
+
+// NewMultiTaskMonitor returns a new TaskMonitor broadcasting to the provided monitors
+func NewMultiTaskMonitor(monitors ...TaskMonitor) TaskMonitor {
+	return &multiTaskMonitor{
+		monitors: monitors,
+	}
+}
+
+// NewNoopMonitor is a task monitor that does nothing
+func NewNoopMonitor() TaskMonitor {
+	return &noopTaskMonitor{}
+}
+
+type noopTaskMonitor struct {
+}
+
+func (mm *noopTaskMonitor) Monitor(c Task) error {
+	return nil
+}
+
+func (mm *noopTaskMonitor) Stop(c Task) error {
+	return nil
+}
+
+type multiTaskMonitor struct {
+	monitors []TaskMonitor
+}
+
+func (mm *multiTaskMonitor) Monitor(c Task) error {
+	for _, m := range mm.monitors {
+		if err := m.Monitor(c); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (mm *multiTaskMonitor) Stop(c Task) error {
+	for _, m := range mm.monitors {
+		if err := m.Stop(c); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/runtime/process.go b/vendor/github.com/containerd/containerd/runtime/process.go
deleted file mode 100644
index 903e831..0000000
--- a/vendor/github.com/containerd/containerd/runtime/process.go
+++ /dev/null
@@ -1,502 +0,0 @@
-package runtime
-
-import (
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"strconv"
-	"strings"
-	"sync"
-	"syscall"
-	"time"
-
-	"github.com/containerd/containerd/osutils"
-	"github.com/containerd/containerd/specs"
-	"github.com/sirupsen/logrus"
-	"golang.org/x/sys/unix"
-)
-
-// Process holds the operation allowed on a container's process
-type Process interface {
-	io.Closer
-
-	// ID of the process.
-	// This is either "init" when it is the container's init process or
-	// it is a user provided id for the process similar to the container id
-	ID() string
-	// Start unblocks the associated container init process.
-	// This should only be called on the process with ID "init"
-	Start() error
-	CloseStdin() error
-	Resize(int, int) error
-	// ExitFD returns the fd the provides an event when the process exits
-	ExitFD() int
-	// ExitStatus returns the exit status of the process or an error if it
-	// has not exited
-	ExitStatus() (uint32, error)
-	// Spec returns the process spec that created the process
-	Spec() specs.ProcessSpec
-	// Signal sends the provided signal to the process
-	Signal(os.Signal) error
-	// Container returns the container that the process belongs to
-	Container() Container
-	// Stdio of the container
-	Stdio() Stdio
-	// SystemPid is the pid on the system
-	SystemPid() int
-	// State returns if the process is running or not
-	State() State
-	// Wait reaps the shim process if avaliable
-	Wait()
-}
-
-type processConfig struct {
-	id          string
-	root        string
-	processSpec specs.ProcessSpec
-	spec        *specs.Spec
-	c           *container
-	stdio       Stdio
-	exec        bool
-	checkpoint  string
-}
-
-func newProcess(config *processConfig) (*process, error) {
-	p := &process{
-		root:      config.root,
-		id:        config.id,
-		container: config.c,
-		spec:      config.processSpec,
-		stdio:     config.stdio,
-		cmdDoneCh: make(chan struct{}),
-		state:     Running,
-	}
-	uid, gid, err := getRootIDs(config.spec)
-	if err != nil {
-		return nil, err
-	}
-	f, err := os.Create(filepath.Join(config.root, "process.json"))
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-
-	ps := ProcessState{
-		ProcessSpec: config.processSpec,
-		Exec:        config.exec,
-		PlatformProcessState: PlatformProcessState{
-			Checkpoint: config.checkpoint,
-			RootUID:    uid,
-			RootGID:    gid,
-		},
-		Stdin:       config.stdio.Stdin,
-		Stdout:      config.stdio.Stdout,
-		Stderr:      config.stdio.Stderr,
-		RuntimeArgs: config.c.runtimeArgs,
-		NoPivotRoot: config.c.noPivotRoot,
-	}
-
-	if err := json.NewEncoder(f).Encode(ps); err != nil {
-		return nil, err
-	}
-	exit, err := getExitPipe(filepath.Join(config.root, ExitFile))
-	if err != nil {
-		return nil, err
-	}
-	control, err := getControlPipe(filepath.Join(config.root, ControlFile))
-	if err != nil {
-		return nil, err
-	}
-	p.exitPipe = exit
-	p.controlPipe = control
-	return p, nil
-}
-
-func loadProcess(root, id string, c *container, s *ProcessState) (*process, error) {
-	p := &process{
-		root:      root,
-		id:        id,
-		container: c,
-		spec:      s.ProcessSpec,
-		stdio: Stdio{
-			Stdin:  s.Stdin,
-			Stdout: s.Stdout,
-			Stderr: s.Stderr,
-		},
-		state: Stopped,
-	}
-
-	startTime, err := ioutil.ReadFile(filepath.Join(p.root, StartTimeFile))
-	if err != nil && !os.IsNotExist(err) {
-		return nil, err
-	}
-	p.startTime = string(startTime)
-
-	if _, err := p.getPidFromFile(); err != nil {
-		return nil, err
-	}
-	if _, err := p.ExitStatus(); err != nil {
-		if err == ErrProcessNotExited {
-			exit, err := getExitPipe(filepath.Join(root, ExitFile))
-			if err != nil {
-				return nil, err
-			}
-			p.exitPipe = exit
-
-			control, err := getControlPipe(filepath.Join(root, ControlFile))
-			if err != nil {
-				return nil, err
-			}
-			p.controlPipe = control
-
-			p.state = Running
-			return p, nil
-		}
-		return nil, err
-	}
-	return p, nil
-}
-
-func readProcStatField(pid int, field int) (string, error) {
-	data, err := ioutil.ReadFile(filepath.Join(string(filepath.Separator), "proc", strconv.Itoa(pid), "stat"))
-	if err != nil {
-		return "", err
-	}
-
-	if field > 2 {
-		// First, split out the name since he could contains spaces.
-		parts := strings.Split(string(data), ") ")
-		// Now split out the rest, we end up with 2 fields less
-		parts = strings.Split(parts[1], " ")
-		return parts[field-2-1], nil // field count start at 1 in manual
-	}
-
-	parts := strings.Split(string(data), " (")
-
-	if field == 1 {
-		return parts[0], nil
-	}
-
-	parts = strings.Split(parts[1], ") ")
-	return parts[0], nil
-}
-
-type process struct {
-	root        string
-	id          string
-	pid         int
-	exitPipe    *os.File
-	controlPipe *os.File
-	container   *container
-	spec        specs.ProcessSpec
-	stdio       Stdio
-	cmd         *exec.Cmd
-	cmdSuccess  bool
-	cmdDoneCh   chan struct{}
-	state       State
-	stateLock   sync.Mutex
-	startTime   string
-}
-
-func (p *process) ID() string {
-	return p.id
-}
-
-func (p *process) Container() Container {
-	return p.container
-}
-
-func (p *process) SystemPid() int {
-	return p.pid
-}
-
-// ExitFD returns the fd of the exit pipe
-func (p *process) ExitFD() int {
-	return int(p.exitPipe.Fd())
-}
-
-func (p *process) CloseStdin() error {
-	_, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 0, 0, 0)
-	return err
-}
-
-func (p *process) Resize(w, h int) error {
-	_, err := fmt.Fprintf(p.controlPipe, "%d %d %d\n", 1, w, h)
-	return err
-}
-
-func (p *process) updateExitStatusFile(status uint32) (uint32, error) {
-	p.stateLock.Lock()
-	p.state = Stopped
-	p.stateLock.Unlock()
-	err := ioutil.WriteFile(filepath.Join(p.root, ExitStatusFile), []byte(fmt.Sprintf("%d", status)), 0644)
-	return status, err
-}
-
-func (p *process) handleSigkilledShim(rst uint32, rerr error) (uint32, error) {
-	if p.cmd == nil || p.cmd.Process == nil {
-		e := unix.Kill(p.pid, 0)
-		if e == syscall.ESRCH {
-			logrus.Warnf("containerd: %s:%s (pid %d) does not exist", p.container.id, p.id, p.pid)
-			// The process died while containerd was down (probably of
-			// SIGKILL, but no way to be sure)
-			return p.updateExitStatusFile(UnknownStatus)
-		}
-
-		// If it's not the same process, just mark it stopped and set
-		// the status to the UnknownStatus value (i.e. 255)
-		if same, err := p.isSameProcess(); !same {
-			logrus.Warnf("containerd: %s:%s (pid %d) is not the same process anymore (%v)", p.container.id, p.id, p.pid, err)
-			// Create the file so we get the exit event generated once monitor kicks in
-			// without having to go through all this process again
-			return p.updateExitStatusFile(UnknownStatus)
-		}
-
-		ppid, err := readProcStatField(p.pid, 4)
-		if err != nil {
-			return rst, fmt.Errorf("could not check process ppid: %v (%v)", err, rerr)
-		}
-		if ppid == "1" {
-			logrus.Warnf("containerd: %s:%s shim died, killing associated process", p.container.id, p.id)
-			// Before sending SIGKILL to container, we need to make sure
-			// the container is not in Paused state. If the container is
-			// Paused, the container will not response to any signal
-			// we should Resume it after sending SIGKILL
-			var (
-				s    State
-				err1 error
-			)
-			if p.container != nil {
-				s, err1 = p.container.Status()
-			}
-
-			unix.Kill(p.pid, syscall.SIGKILL)
-			if err != nil && err != syscall.ESRCH {
-				return UnknownStatus, fmt.Errorf("containerd: unable to SIGKILL %s:%s (pid %v): %v", p.container.id, p.id, p.pid, err)
-			}
-			if p.container != nil {
-				if err1 == nil && s == Paused {
-					p.container.Resume()
-				}
-			}
-
-			// wait for the process to die
-			for {
-				e := unix.Kill(p.pid, 0)
-				if e == syscall.ESRCH {
-					break
-				}
-				time.Sleep(5 * time.Millisecond)
-			}
-			// Create the file so we get the exit event generated once monitor kicks in
-			// without having to go through all this process again
-			return p.updateExitStatusFile(128 + uint32(syscall.SIGKILL))
-		}
-
-		return rst, rerr
-	}
-
-	// The shim was SIGKILLED
-	// We should get the container state first
-	// to make sure the container is not in
-	// Pause state, if it's Paused, we should resume it
-	// and it will exit immediately because shim will send sigkill to
-	// container when died.
-	s, err1 := p.container.Status()
-	if err1 == nil && s == Paused {
-		p.container.Resume()
-	}
-
-	// Ensure we got the shim ProcessState
-	select {
-	case <-p.cmdDoneCh:
-	case <-time.After(2 * time.Minute):
-		return rst, fmt.Errorf("could not get the shim ProcessState within two minutes")
-	}
-
-	shimStatus := p.cmd.ProcessState.Sys().(syscall.WaitStatus)
-	if shimStatus.Signaled() && shimStatus.Signal() == syscall.SIGKILL {
-		logrus.Debugf("containerd: ExitStatus(container: %s, process: %s): shim was SIGKILL'ed reaping its child with pid %d", p.container.id, p.id, p.pid)
-
-		rerr = nil
-		rst = 128 + uint32(shimStatus.Signal())
-
-		p.stateLock.Lock()
-		p.state = Stopped
-		p.stateLock.Unlock()
-	}
-
-	return rst, rerr
-}
-
-func (p *process) ExitStatus() (rst uint32, rerr error) {
-	data, err := ioutil.ReadFile(filepath.Join(p.root, ExitStatusFile))
-	defer func() {
-		if rerr != nil {
-			rst, rerr = p.handleSigkilledShim(rst, rerr)
-		}
-	}()
-	if err != nil {
-		if os.IsNotExist(err) {
-			return UnknownStatus, ErrProcessNotExited
-		}
-		return UnknownStatus, err
-	}
-	if len(data) == 0 {
-		return UnknownStatus, ErrProcessNotExited
-	}
-	p.stateLock.Lock()
-	p.state = Stopped
-	p.stateLock.Unlock()
-
-	i, err := strconv.ParseUint(string(data), 10, 32)
-	return uint32(i), err
-}
-
-func (p *process) Spec() specs.ProcessSpec {
-	return p.spec
-}
-
-func (p *process) Stdio() Stdio {
-	return p.stdio
-}
-
-// Close closes any open files and/or resouces on the process
-func (p *process) Close() error {
-	err := p.exitPipe.Close()
-	if cerr := p.controlPipe.Close(); err == nil {
-		err = cerr
-	}
-	return err
-}
-
-func (p *process) State() State {
-	p.stateLock.Lock()
-	defer p.stateLock.Unlock()
-	return p.state
-}
-
-func (p *process) readStartTime() (string, error) {
-	return readProcStatField(p.pid, 22)
-}
-
-func (p *process) saveStartTime() error {
-	startTime, err := p.readStartTime()
-	if err != nil {
-		return err
-	}
-
-	p.startTime = startTime
-	return ioutil.WriteFile(filepath.Join(p.root, StartTimeFile), []byte(startTime), 0644)
-}
-
-func (p *process) isSameProcess() (bool, error) {
-	if p.pid == 0 {
-		_, err := p.getPidFromFile()
-		if err != nil {
-			return false, err
-		}
-	}
-
-	// for backward compat assume it's the same if startTime wasn't set
-	if p.startTime == "" {
-		// Sometimes the process dies before we can get the starttime,
-		// check that the process actually exists
-		if err := unix.Kill(p.pid, 0); err != syscall.ESRCH {
-			return true, nil
-		}
-		return false, nil
-	}
-
-	startTime, err := p.readStartTime()
-	if err != nil {
-		return false, err
-	}
-
-	return startTime == p.startTime, nil
-}
-
-// Wait will reap the shim process
-func (p *process) Wait() {
-	if p.cmdDoneCh != nil {
-		<-p.cmdDoneCh
-	}
-}
-
-func getExitPipe(path string) (*os.File, error) {
-	if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
-		return nil, err
-	}
-	// add NONBLOCK in case the other side has already closed or else
-	// this function would never return
-	return os.OpenFile(path, syscall.O_RDONLY|syscall.O_NONBLOCK, 0)
-}
-
-func getControlPipe(path string) (*os.File, error) {
-	if err := unix.Mkfifo(path, 0755); err != nil && !os.IsExist(err) {
-		return nil, err
-	}
-	return os.OpenFile(path, syscall.O_RDWR|syscall.O_NONBLOCK, 0)
-}
-
-// Signal sends the provided signal to the process
-func (p *process) Signal(s os.Signal) error {
-	return syscall.Kill(p.pid, s.(syscall.Signal))
-}
-
-// Start unblocks the associated container init process.
-// This should only be called on the process with ID "init"
-func (p *process) Start() error {
-	if p.ID() == InitProcessID {
-		var (
-			errC = make(chan error, 1)
-			args = append(p.container.runtimeArgs, "start", p.container.id)
-			cmd  = exec.Command(p.container.runtime, args...)
-		)
-		go func() {
-			out, err := cmd.CombinedOutput()
-			if err != nil {
-				errC <- fmt.Errorf("%s: %q", err.Error(), out)
-			}
-			errC <- nil
-		}()
-		select {
-		case err := <-errC:
-			if err != nil {
-				return err
-			}
-		case <-p.cmdDoneCh:
-			if !p.cmdSuccess {
-				if cmd.Process != nil {
-					cmd.Process.Kill()
-				}
-				cmd.Wait()
-				return ErrShimExited
-			}
-			err := <-errC
-			if err != nil {
-				return err
-			}
-		}
-	}
-	return nil
-}
-
-// Delete delete any resources held by the container
-func (p *process) Delete() error {
-	var (
-		args = append(p.container.runtimeArgs, "delete", "-f", p.container.id)
-		cmd  = exec.Command(p.container.runtime, args...)
-	)
-
-	cmd.SysProcAttr = osutils.SetPDeathSig()
-	out, err := cmd.CombinedOutput()
-	if err != nil {
-		return fmt.Errorf("%s: %v", out, err)
-	}
-	return nil
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/process_linux.go b/vendor/github.com/containerd/containerd/runtime/process_linux.go
deleted file mode 100644
index d14c4d8..0000000
--- a/vendor/github.com/containerd/containerd/runtime/process_linux.go
+++ /dev/null
@@ -1,22 +0,0 @@
-// +build linux
-
-package runtime
-
-import (
-	"io/ioutil"
-	"path/filepath"
-	"strconv"
-)
-
-func (p *process) getPidFromFile() (int, error) {
-	data, err := ioutil.ReadFile(filepath.Join(p.root, "pid"))
-	if err != nil {
-		return -1, err
-	}
-	i, err := strconv.Atoi(string(data))
-	if err != nil {
-		return -1, errInvalidPidInt
-	}
-	p.pid = i
-	return i, nil
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/process_solaris.go b/vendor/github.com/containerd/containerd/runtime/process_solaris.go
deleted file mode 100644
index 8159f30..0000000
--- a/vendor/github.com/containerd/containerd/runtime/process_solaris.go
+++ /dev/null
@@ -1,34 +0,0 @@
-// +build solaris
-
-package runtime
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"os/exec"
-
-	runtimespec "github.com/opencontainers/runtime-spec/specs-go"
-)
-
-// On Solaris we already have a state file maintained by the framework.
-// This is read by runz state. We just call that instead of maintaining
-// a separate file.
-func (p *process) getPidFromFile() (int, error) {
-	//we get this information from runz state
-	cmd := exec.Command("runc", "state", p.container.ID())
-	outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
-	cmd.Stdout, cmd.Stderr = outBuf, errBuf
-
-	if err := cmd.Run(); err != nil {
-		// TODO: Improve logic
-		return -1, errContainerNotFound
-	}
-	response := runtimespec.State{}
-	decoder := json.NewDecoder(outBuf)
-	if err := decoder.Decode(&response); err != nil {
-		return -1, fmt.Errorf("unable to decode json response: %+v", err)
-	}
-	p.pid = response.Pid
-	return p.pid, nil
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/runtime.go b/vendor/github.com/containerd/containerd/runtime/runtime.go
index f702487..39ffe86 100644
--- a/vendor/github.com/containerd/containerd/runtime/runtime.go
+++ b/vendor/github.com/containerd/containerd/runtime/runtime.go
@@ -1,134 +1,54 @@
 package runtime
 
 import (
-	"errors"
+	"context"
 	"time"
 
-	"github.com/containerd/containerd/specs"
+	"github.com/containerd/containerd/mount"
+	"github.com/gogo/protobuf/types"
 )
 
-var (
-	// ErrContainerExited is returned when access to an exited
-	// container is attempted
-	ErrContainerExited = errors.New("containerd: container has exited")
-	// ErrProcessNotExited is returned when trying to retrieve the exit
-	// status of an alive process
-	ErrProcessNotExited = errors.New("containerd: process has not exited")
-	// ErrContainerNotStarted is returned when a container fails to
-	// start without error from the shim or the OCI runtime
-	ErrContainerNotStarted = errors.New("containerd: container not started")
-	// ErrContainerStartTimeout is returned if a container takes too
-	// long to start
-	ErrContainerStartTimeout = errors.New("containerd: container did not start before the specified timeout")
-	// ErrShimExited is returned if the shim or the contianer's init process
-	// exits before completing
-	ErrShimExited = errors.New("containerd: shim exited before container process was started")
-
-	errNoPidFile         = errors.New("containerd: no process pid file found")
-	errInvalidPidInt     = errors.New("containerd: process pid is invalid")
-	errContainerNotFound = errors.New("containerd: container not found")
-	errNotImplemented    = errors.New("containerd: not implemented")
-)
-
-const (
-	// ExitFile holds the name of the pipe used to monitor process
-	// exit
-	ExitFile = "exit"
-	// ExitStatusFile holds the name of the file where the container
-	// exit code is to be written
-	ExitStatusFile = "exitStatus"
-	// StateFile holds the name of the file where the container state
-	// is written
-	StateFile = "state.json"
-	// ControlFile holds the name of the pipe used to control the shim
-	ControlFile = "control"
-	// InitProcessID holds the special ID used for the very first
-	// container's process
-	InitProcessID = "init"
-	// StartTimeFile holds the name of the file in which the process
-	// start time is saved
-	StartTimeFile = "starttime"
-
-	// UnknownStatus is the value returned when a process exit
-	// status cannot be determined
-	UnknownStatus = 255
-)
-
-// Checkpoint holds information regarding a container checkpoint
-type Checkpoint struct {
-	// Timestamp is the time that checkpoint happened
-	Created time.Time `json:"created"`
-	// Name is the name of the checkpoint
-	Name string `json:"name"`
-	// TCP checkpoints open tcp connections
-	TCP bool `json:"tcp"`
-	// UnixSockets persists unix sockets in the checkpoint
-	UnixSockets bool `json:"unixSockets"`
-	// Shell persists tty sessions in the checkpoint
-	Shell bool `json:"shell"`
-	// Exit exits the container after the checkpoint is finished
-	Exit bool `json:"exit"`
-	// EmptyNS tells CRIU to omit a specified namespace
-	EmptyNS []string `json:"emptyNS,omitempty"`
+// IO holds process IO information
+type IO struct {
+	Stdin    string
+	Stdout   string
+	Stderr   string
+	Terminal bool
 }
 
-// PlatformProcessState container platform-specific fields in the ProcessState structure
-type PlatformProcessState struct {
-	Checkpoint string `json:"checkpoint"`
-	RootUID    int    `json:"rootUID"`
-	RootGID    int    `json:"rootGID"`
+// CreateOpts contains task creation data
+type CreateOpts struct {
+	// Spec is the OCI runtime spec
+	Spec *types.Any
+	// Rootfs mounts to perform to gain access to the container's filesystem
+	Rootfs []mount.Mount
+	// IO for the container's main process
+	IO IO
+	// Checkpoint digest to restore container state
+	Checkpoint string
+	// Options for the runtime and container
+	Options *types.Any
 }
 
-// State represents a container state
-type State string
-
-// Resource regroups the various container limits that can be updated
-type Resource struct {
-	CPUShares          int64
-	BlkioWeight        uint16
-	CPUPeriod          int64
-	CPUQuota           int64
-	CpusetCpus         string
-	CpusetMems         string
-	KernelMemory       int64
-	KernelTCPMemory    int64
-	Memory             int64
-	MemoryReservation  int64
-	MemorySwap         int64
-	PidsLimit          int64
-	CPURealtimePeriod  uint64
-	CPURealtimdRuntime int64
+// Exit information for a process
+type Exit struct {
+	Pid       uint32
+	Status    uint32
+	Timestamp time.Time
 }
 
-// Possible container states
-const (
-	Paused  = State("paused")
-	Stopped = State("stopped")
-	Running = State("running")
-)
-
-type state struct {
-	Bundle      string   `json:"bundle"`
-	Labels      []string `json:"labels"`
-	Stdin       string   `json:"stdin"`
-	Stdout      string   `json:"stdout"`
-	Stderr      string   `json:"stderr"`
-	Runtime     string   `json:"runtime"`
-	RuntimeArgs []string `json:"runtimeArgs"`
-	Shim        string   `json:"shim"`
-	NoPivotRoot bool     `json:"noPivotRoot"`
-}
-
-// ProcessState holds the process OCI specs along with various fields
-// required by containerd
-type ProcessState struct {
-	specs.ProcessSpec
-	Exec        bool     `json:"exec"`
-	Stdin       string   `json:"containerdStdin"`
-	Stdout      string   `json:"containerdStdout"`
-	Stderr      string   `json:"containerdStderr"`
-	RuntimeArgs []string `json:"runtimeArgs"`
-	NoPivotRoot bool     `json:"noPivotRoot"`
-
-	PlatformProcessState
+// Runtime is responsible for the creation of containers for a certain platform,
+// arch, or custom usage.
+type Runtime interface {
+	// ID of the runtime
+	ID() string
+	// Create creates a task with the provided id and options.
+	Create(ctx context.Context, id string, opts CreateOpts) (Task, error)
+	// Get returns a task.
+	Get(context.Context, string) (Task, error)
+	// Tasks returns all the current tasks for the runtime.
+	// Any container runs at most one task at a time.
+	Tasks(context.Context) ([]Task, error)
+	// Delete removes the task in the runtime.
+	Delete(context.Context, Task) (*Exit, error)
 }
diff --git a/vendor/github.com/containerd/containerd/runtime/stats.go b/vendor/github.com/containerd/containerd/runtime/stats.go
deleted file mode 100644
index 4cfa9e9..0000000
--- a/vendor/github.com/containerd/containerd/runtime/stats.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package runtime
-
-import "time"
-
-// Stat holds a container statistics
-type Stat struct {
-	// Timestamp is the time that the statistics where collected
-	Timestamp time.Time
-	CPU       CPU                `json:"cpu"`
-	Memory    Memory             `json:"memory"`
-	Pids      Pids               `json:"pids"`
-	Blkio     Blkio              `json:"blkio"`
-	Hugetlb   map[string]Hugetlb `json:"hugetlb"`
-}
-
-// Hugetlb holds information regarding a container huge tlb usage
-type Hugetlb struct {
-	Usage   uint64 `json:"usage,omitempty"`
-	Max     uint64 `json:"max,omitempty"`
-	Failcnt uint64 `json:"failcnt"`
-}
-
-// BlkioEntry represents a single record for a Blkio stat
-type BlkioEntry struct {
-	Major uint64 `json:"major,omitempty"`
-	Minor uint64 `json:"minor,omitempty"`
-	Op    string `json:"op,omitempty"`
-	Value uint64 `json:"value,omitempty"`
-}
-
-// Blkio regroups all the Blkio related stats
-type Blkio struct {
-	IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"`
-	IoServicedRecursive     []BlkioEntry `json:"ioServicedRecursive,omitempty"`
-	IoQueuedRecursive       []BlkioEntry `json:"ioQueueRecursive,omitempty"`
-	IoServiceTimeRecursive  []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"`
-	IoWaitTimeRecursive     []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"`
-	IoMergedRecursive       []BlkioEntry `json:"ioMergedRecursive,omitempty"`
-	IoTimeRecursive         []BlkioEntry `json:"ioTimeRecursive,omitempty"`
-	SectorsRecursive        []BlkioEntry `json:"sectorsRecursive,omitempty"`
-}
-
-// Pids holds the stat of the pid usage of the machine
-type Pids struct {
-	Current uint64 `json:"current,omitempty"`
-	Limit   uint64 `json:"limit,omitempty"`
-}
-
-// Throttling holds a cpu throttling information
-type Throttling struct {
-	Periods          uint64 `json:"periods,omitempty"`
-	ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"`
-	ThrottledTime    uint64 `json:"throttledTime,omitempty"`
-}
-
-// CPUUsage holds information regarding cpu usage
-type CPUUsage struct {
-	// Units: nanoseconds.
-	Total  uint64   `json:"total,omitempty"`
-	Percpu []uint64 `json:"percpu,omitempty"`
-	Kernel uint64   `json:"kernel"`
-	User   uint64   `json:"user"`
-}
-
-// CPU regroups both a CPU usage and throttling information
-type CPU struct {
-	Usage      CPUUsage   `json:"usage,omitempty"`
-	Throttling Throttling `json:"throttling,omitempty"`
-}
-
-// MemoryEntry regroups statistic about a given type of memory
-type MemoryEntry struct {
-	Limit   uint64 `json:"limit"`
-	Usage   uint64 `json:"usage,omitempty"`
-	Max     uint64 `json:"max,omitempty"`
-	Failcnt uint64 `json:"failcnt"`
-}
-
-// Memory holds information regarding the different type of memories available
-type Memory struct {
-	Cache     uint64            `json:"cache,omitempty"`
-	Usage     MemoryEntry       `json:"usage,omitempty"`
-	Swap      MemoryEntry       `json:"swap,omitempty"`
-	Kernel    MemoryEntry       `json:"kernel,omitempty"`
-	KernelTCP MemoryEntry       `json:"kernelTCP,omitempty"`
-	Raw       map[string]uint64 `json:"raw,omitempty"`
-}
diff --git a/vendor/github.com/containerd/containerd/runtime/task.go b/vendor/github.com/containerd/containerd/runtime/task.go
new file mode 100644
index 0000000..019d4c8
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/runtime/task.go
@@ -0,0 +1,102 @@
+package runtime
+
+import (
+	"context"
+	"time"
+
+	"github.com/gogo/protobuf/types"
+)
+
+type TaskInfo struct {
+	ID        string
+	Runtime   string
+	Spec      []byte
+	Namespace string
+}
+
+type Process interface {
+	ID() string
+	// State returns the process state
+	State(context.Context) (State, error)
+	// Kill signals a container
+	Kill(context.Context, uint32, bool) error
+	// Pty resizes the processes pty/console
+	ResizePty(context.Context, ConsoleSize) error
+	// CloseStdin closes the processes stdin
+	CloseIO(context.Context) error
+	// Start the container's user defined process
+	Start(context.Context) error
+	// Wait for the process to exit
+	Wait(context.Context) (*Exit, error)
+}
+
+type Task interface {
+	Process
+
+	// Information of the container
+	Info() TaskInfo
+	// Pause pauses the container process
+	Pause(context.Context) error
+	// Resume unpauses the container process
+	Resume(context.Context) error
+	// Exec adds a process into the container
+	Exec(context.Context, string, ExecOpts) (Process, error)
+	// Pids returns all pids
+	Pids(context.Context) ([]ProcessInfo, error)
+	// Checkpoint checkpoints a container to an image with live system data
+	Checkpoint(context.Context, string, *types.Any) error
+	// DeleteProcess deletes a specific exec process via its id
+	DeleteProcess(context.Context, string) (*Exit, error)
+	// Update sets the provided resources to a running task
+	Update(context.Context, *types.Any) error
+	// Process returns a process within the task for the provided id
+	Process(context.Context, string) (Process, error)
+	// Metrics returns runtime specific metrics for a task
+	Metrics(context.Context) (interface{}, error)
+}
+
+type ExecOpts struct {
+	Spec *types.Any
+	IO   IO
+}
+
+type ConsoleSize struct {
+	Width  uint32
+	Height uint32
+}
+
+type Status int
+
+const (
+	CreatedStatus Status = iota + 1
+	RunningStatus
+	StoppedStatus
+	DeletedStatus
+	PausedStatus
+	PausingStatus
+)
+
+type State struct {
+	// Status is the current status of the container
+	Status Status
+	// Pid is the main process id for the container
+	Pid uint32
+	// ExitStatus of the process
+	// Only valid if the Status is Stopped
+	ExitStatus uint32
+	// ExitedAt is the time at which the process exited
+	// Only valid if the Status is Stopped
+	ExitedAt time.Time
+	Stdin    string
+	Stdout   string
+	Stderr   string
+	Terminal bool
+}
+
+type ProcessInfo struct {
+	// Pid is the process ID
+	Pid uint32
+	// Info includes additional process information
+	// Info varies by platform
+	Info interface{}
+}
diff --git a/vendor/github.com/containerd/containerd/runtime/task_list.go b/vendor/github.com/containerd/containerd/runtime/task_list.go
new file mode 100644
index 0000000..12062ce
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/runtime/task_list.go
@@ -0,0 +1,95 @@
+package runtime
+
+import (
+	"context"
+	"sync"
+
+	"github.com/containerd/containerd/namespaces"
+	"github.com/pkg/errors"
+)
+
+var (
+	ErrTaskNotExists     = errors.New("task does not exist")
+	ErrTaskAlreadyExists = errors.New("task already exists")
+)
+
+func NewTaskList() *TaskList {
+	return &TaskList{
+		tasks: make(map[string]map[string]Task),
+	}
+}
+
+type TaskList struct {
+	mu    sync.Mutex
+	tasks map[string]map[string]Task
+}
+
+func (l *TaskList) Get(ctx context.Context, id string) (Task, error) {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+	tasks, ok := l.tasks[namespace]
+	if !ok {
+		return nil, ErrTaskNotExists
+	}
+	t, ok := tasks[id]
+	if !ok {
+		return nil, ErrTaskNotExists
+	}
+	return t, nil
+}
+
+func (l *TaskList) GetAll(ctx context.Context) ([]Task, error) {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+	var o []Task
+	tasks, ok := l.tasks[namespace]
+	if !ok {
+		return o, nil
+	}
+	for _, t := range tasks {
+		o = append(o, t)
+	}
+	return o, nil
+}
+
+func (l *TaskList) Add(ctx context.Context, t Task) error {
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return err
+	}
+	return l.AddWithNamespace(namespace, t)
+}
+
+func (l *TaskList) AddWithNamespace(namespace string, t Task) error {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+
+	id := t.ID()
+	if _, ok := l.tasks[namespace]; !ok {
+		l.tasks[namespace] = make(map[string]Task)
+	}
+	if _, ok := l.tasks[namespace][id]; ok {
+		return errors.Wrap(ErrTaskAlreadyExists, id)
+	}
+	l.tasks[namespace][id] = t
+	return nil
+}
+
+func (l *TaskList) Delete(ctx context.Context, t Task) {
+	l.mu.Lock()
+	defer l.mu.Unlock()
+	namespace, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return
+	}
+	tasks, ok := l.tasks[namespace]
+	if ok {
+		delete(tasks, t.ID())
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/runtime/typeurl.go b/vendor/github.com/containerd/containerd/runtime/typeurl.go
new file mode 100644
index 0000000..8ba2b43
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/runtime/typeurl.go
@@ -0,0 +1,18 @@
+package runtime
+
+import (
+	"strconv"
+
+	"github.com/containerd/typeurl"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func init() {
+	const prefix = "types.containerd.io"
+	// register TypeUrls for commonly marshaled external types
+	major := strconv.Itoa(specs.VersionMajor)
+	typeurl.Register(&specs.Spec{}, prefix, "opencontainers/runtime-spec", major, "Spec")
+	typeurl.Register(&specs.Process{}, prefix, "opencontainers/runtime-spec", major, "Process")
+	typeurl.Register(&specs.LinuxResources{}, prefix, "opencontainers/runtime-spec", major, "LinuxResources")
+	typeurl.Register(&specs.WindowsResources{}, prefix, "opencontainers/runtime-spec", major, "WindowsResources")
+}
diff --git a/vendor/github.com/containerd/containerd/server/config.go b/vendor/github.com/containerd/containerd/server/config.go
new file mode 100644
index 0000000..764f6bd
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/server/config.go
@@ -0,0 +1,90 @@
+package server
+
+import (
+	"bytes"
+	"io"
+
+	"github.com/BurntSushi/toml"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/pkg/errors"
+)
+
+// Config provides containerd configuration data for the server
+type Config struct {
+	// Root is the path to a directory where containerd will store persistent data
+	Root string `toml:"root"`
+	// State is the path to a directory where containerd will store transient data
+	State string `toml:"state"`
+	// GRPC configuration settings
+	GRPC GRPCConfig `toml:"grpc"`
+	// Debug and profiling settings
+	Debug Debug `toml:"debug"`
+	// Metrics and monitoring settings
+	Metrics MetricsConfig `toml:"metrics"`
+	// Plugins provides plugin specific configuration for the initialization of a plugin
+	Plugins map[string]toml.Primitive `toml:"plugins"`
+	// Enable containerd as a subreaper
+	Subreaper bool `toml:"subreaper"`
+	// OOMScore adjust the containerd's oom score
+	OOMScore int `toml:"oom_score"`
+	// Cgroup specifies cgroup information for the containerd daemon process
+	Cgroup CgroupConfig `toml:"cgroup"`
+
+	md toml.MetaData
+}
+
+type GRPCConfig struct {
+	Address string `toml:"address"`
+	Uid     int    `toml:"uid"`
+	Gid     int    `toml:"gid"`
+}
+
+type Debug struct {
+	Address string `toml:"address"`
+	Uid     int    `toml:"uid"`
+	Gid     int    `toml:"gid"`
+	Level   string `toml:"level"`
+}
+
+type MetricsConfig struct {
+	Address string `toml:"address"`
+}
+
+type CgroupConfig struct {
+	Path string `toml:"path"`
+}
+
+// Decode unmarshals a plugin specific configuration by plugin id
+func (c *Config) Decode(id string, v interface{}) (interface{}, error) {
+	data, ok := c.Plugins[id]
+	if !ok {
+		return v, nil
+	}
+	if err := c.md.PrimitiveDecode(data, v); err != nil {
+		return nil, err
+	}
+	return v, nil
+}
+
+// WriteTo marshals the config to the provided writer
+func (c *Config) WriteTo(w io.Writer) (int64, error) {
+	buf := bytes.NewBuffer(nil)
+	e := toml.NewEncoder(buf)
+	if err := e.Encode(c); err != nil {
+		return 0, err
+	}
+	return io.Copy(w, buf)
+}
+
+// LoadConfig loads the containerd server config from the provided path
+func LoadConfig(path string, v *Config) error {
+	if v == nil {
+		return errors.Wrapf(errdefs.ErrInvalidArgument, "argument v must not be nil")
+	}
+	md, err := toml.DecodeFile(path, v)
+	if err != nil {
+		return err
+	}
+	v.md = md
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/server/server.go b/vendor/github.com/containerd/containerd/server/server.go
new file mode 100644
index 0000000..d1a58e9
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/server/server.go
@@ -0,0 +1,272 @@
+package server
+
+import (
+	"expvar"
+	"net"
+	"net/http"
+	"net/http/pprof"
+	"os"
+	"path/filepath"
+	"strings"
+
+	"github.com/boltdb/bolt"
+	containers "github.com/containerd/containerd/api/services/containers/v1"
+	contentapi "github.com/containerd/containerd/api/services/content/v1"
+	diff "github.com/containerd/containerd/api/services/diff/v1"
+	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	images "github.com/containerd/containerd/api/services/images/v1"
+	introspection "github.com/containerd/containerd/api/services/introspection/v1"
+	namespaces "github.com/containerd/containerd/api/services/namespaces/v1"
+	snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
+	tasks "github.com/containerd/containerd/api/services/tasks/v1"
+	version "github.com/containerd/containerd/api/services/version/v1"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/content/local"
+	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/metadata"
+	"github.com/containerd/containerd/plugin"
+	"github.com/containerd/containerd/snapshot"
+	metrics "github.com/docker/go-metrics"
+	grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/health/grpc_health_v1"
+)
+
+// New creates and initializes a new containerd server
+func New(ctx context.Context, config *Config) (*Server, error) {
+	if config.Root == "" {
+		return nil, errors.New("root must be specified")
+	}
+	if config.State == "" {
+		return nil, errors.New("state must be specified")
+	}
+	if err := os.MkdirAll(config.Root, 0711); err != nil {
+		return nil, err
+	}
+	if err := os.MkdirAll(config.State, 0711); err != nil {
+		return nil, err
+	}
+	if err := apply(ctx, config); err != nil {
+		return nil, err
+	}
+	plugins, err := loadPlugins(config)
+	if err != nil {
+		return nil, err
+	}
+	rpc := grpc.NewServer(
+		grpc.UnaryInterceptor(interceptor),
+		grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
+	)
+	var (
+		services []plugin.Service
+		s        = &Server{
+			rpc:    rpc,
+			events: events.NewExchange(),
+		}
+		initialized = plugin.NewPluginSet()
+	)
+	for _, p := range plugins {
+		id := p.URI()
+		log.G(ctx).WithField("type", p.Type).Infof("loading plugin %q...", id)
+
+		initContext := plugin.NewContext(
+			ctx,
+			p,
+			initialized,
+			config.Root,
+			config.State,
+		)
+		initContext.Events = s.events
+		initContext.Address = config.GRPC.Address
+
+		// load the plugin specific configuration if it is provided
+		if p.Config != nil {
+			pluginConfig, err := config.Decode(p.ID, p.Config)
+			if err != nil {
+				return nil, err
+			}
+			initContext.Config = pluginConfig
+		}
+		result := p.Init(initContext)
+		if err := initialized.Add(result); err != nil {
+			return nil, errors.Wrapf(err, "could not add plugin result to plugin set")
+		}
+
+		instance, err := result.Instance()
+		if err != nil {
+			if plugin.IsSkipPlugin(err) {
+				log.G(ctx).WithField("type", p.Type).Infof("skip loading plugin %q...", id)
+			} else {
+				log.G(ctx).WithError(err).Warnf("failed to load plugin %s", id)
+			}
+			continue
+		}
+		// check for grpc services that should be registered with the server
+		if service, ok := instance.(plugin.Service); ok {
+			services = append(services, service)
+		}
+	}
+	// register services after all plugins have been initialized
+	for _, service := range services {
+		if err := service.Register(rpc); err != nil {
+			return nil, err
+		}
+	}
+	return s, nil
+}
+
+// Server is the containerd main daemon
+type Server struct {
+	rpc    *grpc.Server
+	events *events.Exchange
+}
+
+// ServeGRPC provides the containerd grpc APIs on the provided listener
+func (s *Server) ServeGRPC(l net.Listener) error {
+	// before we start serving the grpc API regster the grpc_prometheus metrics
+	// handler.  This needs to be the last service registered so that it can collect
+	// metrics for every other service
+	grpc_prometheus.Register(s.rpc)
+	return trapClosedConnErr(s.rpc.Serve(l))
+}
+
+// ServeMetrics provides a prometheus endpoint for exposing metrics
+func (s *Server) ServeMetrics(l net.Listener) error {
+	m := http.NewServeMux()
+	m.Handle("/v1/metrics", metrics.Handler())
+	return trapClosedConnErr(http.Serve(l, m))
+}
+
+// ServeDebug provides a debug endpoint
+func (s *Server) ServeDebug(l net.Listener) error {
+	// don't use the default http server mux to make sure nothing gets registered
+	// that we don't want to expose via containerd
+	m := http.NewServeMux()
+	m.Handle("/debug/vars", expvar.Handler())
+	m.Handle("/debug/pprof/", http.HandlerFunc(pprof.Index))
+	m.Handle("/debug/pprof/cmdline", http.HandlerFunc(pprof.Cmdline))
+	m.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
+	m.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
+	m.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
+	return trapClosedConnErr(http.Serve(l, m))
+}
+
+// Stop the containerd server canceling any open connections
+func (s *Server) Stop() {
+	s.rpc.Stop()
+}
+
+func loadPlugins(config *Config) ([]*plugin.Registration, error) {
+	// load all plugins into containerd
+	if err := plugin.Load(filepath.Join(config.Root, "plugins")); err != nil {
+		return nil, err
+	}
+	// load additional plugins that don't automatically register themselves
+	plugin.Register(&plugin.Registration{
+		Type: plugin.ContentPlugin,
+		ID:   "content",
+		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+			ic.Meta.Exports["root"] = ic.Root
+			return local.NewStore(ic.Root)
+		},
+	})
+	plugin.Register(&plugin.Registration{
+		Type: plugin.MetadataPlugin,
+		ID:   "bolt",
+		Requires: []plugin.Type{
+			plugin.ContentPlugin,
+			plugin.SnapshotPlugin,
+		},
+		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+			if err := os.MkdirAll(ic.Root, 0711); err != nil {
+				return nil, err
+			}
+			cs, err := ic.Get(plugin.ContentPlugin)
+			if err != nil {
+				return nil, err
+			}
+
+			snapshottersRaw, err := ic.GetByType(plugin.SnapshotPlugin)
+			if err != nil {
+				return nil, err
+			}
+
+			snapshotters := make(map[string]snapshot.Snapshotter)
+			for name, sn := range snapshottersRaw {
+				sn, err := sn.Instance()
+				if err != nil {
+					log.G(ic.Context).WithError(err).
+						Warnf("could not use snapshotter %v in metadata plugin", name)
+					continue
+				}
+				snapshotters[name] = sn.(snapshot.Snapshotter)
+			}
+
+			path := filepath.Join(ic.Root, "meta.db")
+			ic.Meta.Exports["path"] = path
+
+			db, err := bolt.Open(path, 0644, nil)
+			if err != nil {
+				return nil, err
+			}
+			mdb := metadata.NewDB(db, cs.(content.Store), snapshotters)
+			if err := mdb.Init(ic.Context); err != nil {
+				return nil, err
+			}
+			return mdb, nil
+		},
+	})
+
+	// return the ordered graph for plugins
+	return plugin.Graph(), nil
+}
+
+func interceptor(
+	ctx context.Context,
+	req interface{},
+	info *grpc.UnaryServerInfo,
+	handler grpc.UnaryHandler,
+) (interface{}, error) {
+	ctx = log.WithModule(ctx, "containerd")
+	switch info.Server.(type) {
+	case tasks.TasksServer:
+		ctx = log.WithModule(ctx, "tasks")
+	case containers.ContainersServer:
+		ctx = log.WithModule(ctx, "containers")
+	case contentapi.ContentServer:
+		ctx = log.WithModule(ctx, "content")
+	case images.ImagesServer:
+		ctx = log.WithModule(ctx, "images")
+	case grpc_health_v1.HealthServer:
+		// No need to change the context
+	case version.VersionServer:
+		ctx = log.WithModule(ctx, "version")
+	case snapshotapi.SnapshotsServer:
+		ctx = log.WithModule(ctx, "snapshot")
+	case diff.DiffServer:
+		ctx = log.WithModule(ctx, "diff")
+	case namespaces.NamespacesServer:
+		ctx = log.WithModule(ctx, "namespaces")
+	case eventsapi.EventsServer:
+		ctx = log.WithModule(ctx, "events")
+	case introspection.IntrospectionServer:
+		ctx = log.WithModule(ctx, "introspection")
+	default:
+		log.G(ctx).Warnf("unknown GRPC server type: %#v\n", info.Server)
+	}
+	return grpc_prometheus.UnaryServerInterceptor(ctx, req, info, handler)
+}
+
+func trapClosedConnErr(err error) error {
+	if err == nil {
+		return nil
+	}
+	if strings.Contains(err.Error(), "use of closed network connection") {
+		return nil
+	}
+	return err
+}
diff --git a/vendor/github.com/containerd/containerd/server/server_linux.go b/vendor/github.com/containerd/containerd/server/server_linux.go
new file mode 100644
index 0000000..b2f5e8b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/server/server_linux.go
@@ -0,0 +1,57 @@
+package server
+
+import (
+	"context"
+	"os"
+
+	"github.com/containerd/cgroups"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/sys"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+	// DefaultRootDir is the default location used by containerd to store
+	// persistent data
+	DefaultRootDir = "/var/lib/containerd"
+	// DefaultStateDir is the default location used by containerd to store
+	// transient data
+	DefaultStateDir = "/run/containerd"
+	// DefaultAddress is the default unix socket address
+	DefaultAddress = "/run/containerd/containerd.sock"
+	// DefaultDebugAddress is the default unix socket address for pprof data
+	DefaultDebugAddress = "/run/containerd/debug.sock"
+)
+
+// apply sets config settings on the server process
+func apply(ctx context.Context, config *Config) error {
+	if config.Subreaper {
+		log.G(ctx).Info("setting subreaper...")
+		if err := sys.SetSubreaper(1); err != nil {
+			return err
+		}
+	}
+	if config.OOMScore != 0 {
+		log.G(ctx).Infof("changing OOM score to %d", config.OOMScore)
+		if err := sys.SetOOMScore(os.Getpid(), config.OOMScore); err != nil {
+			return err
+		}
+	}
+	if config.Cgroup.Path != "" {
+		cg, err := cgroups.Load(cgroups.V1, cgroups.StaticPath(config.Cgroup.Path))
+		if err != nil {
+			if err != cgroups.ErrCgroupDeleted {
+				return err
+			}
+			if cg, err = cgroups.New(cgroups.V1, cgroups.StaticPath(config.Cgroup.Path), &specs.LinuxResources{}); err != nil {
+				return err
+			}
+		}
+		if err := cg.Add(cgroups.Process{
+			Pid: os.Getpid(),
+		}); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/server/server_solaris.go b/vendor/github.com/containerd/containerd/server/server_solaris.go
new file mode 100644
index 0000000..71e1c09
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/server/server_solaris.go
@@ -0,0 +1,14 @@
+package server
+
+import "context"
+
+const (
+	// DefaultAddress is the default unix socket address
+	DefaultAddress = "/var/run/containerd/containerd.sock"
+	// DefaultDebugAddress is the default unix socket address for pprof data
+	DefaultDebugAddress = "/var/run/containerd/debug.sock"
+)
+
+func apply(_ context.Context, _ *Config) error {
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/server/server_unsupported.go b/vendor/github.com/containerd/containerd/server/server_unsupported.go
new file mode 100644
index 0000000..f820e3f
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/server/server_unsupported.go
@@ -0,0 +1,22 @@
+// +build !linux,!windows,!solaris
+
+package server
+
+import "context"
+
+const (
+	// DefaultRootDir is the default location used by containerd to store
+	// persistent data
+	DefaultRootDir = "/var/lib/containerd"
+	// DefaultStateDir is the default location used by containerd to store
+	// transient data
+	DefaultStateDir = "/run/containerd"
+	// DefaultAddress is the default unix socket address
+	DefaultAddress = "/run/containerd/containerd.sock"
+	// DefaultDebugAddress is the default unix socket address for pprof data
+	DefaultDebugAddress = "/run/containerd/debug.sock"
+)
+
+func apply(_ context.Context, _ *Config) error {
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/server/server_windows.go b/vendor/github.com/containerd/containerd/server/server_windows.go
new file mode 100644
index 0000000..b35e776
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/server/server_windows.go
@@ -0,0 +1,29 @@
+// +build windows
+
+package server
+
+import (
+	"context"
+	"os"
+	"path/filepath"
+)
+
+var (
+	// DefaultRootDir is the default location used by containerd to store
+	// persistent data
+	DefaultRootDir = filepath.Join(os.Getenv("programfiles"), "containerd", "root")
+	// DefaultStateDir is the default location used by containerd to store
+	// transient data
+	DefaultStateDir = filepath.Join(os.Getenv("programfiles"), "containerd", "state")
+)
+
+const (
+	// DefaultAddress is the default winpipe address
+	DefaultAddress = `\\.\pipe\containerd-containerd`
+	// DefaultDebugAddress is the default winpipe address for pprof data
+	DefaultDebugAddress = `\\.\pipe\containerd-debug`
+)
+
+func apply(_ context.Context, _ *Config) error {
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/services/content/reader.go b/vendor/github.com/containerd/containerd/services/content/reader.go
new file mode 100644
index 0000000..a8cc554
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/content/reader.go
@@ -0,0 +1,49 @@
+package content
+
+import (
+	"context"
+
+	contentapi "github.com/containerd/containerd/api/services/content/v1"
+	digest "github.com/opencontainers/go-digest"
+)
+
+type remoteReaderAt struct {
+	ctx    context.Context
+	digest digest.Digest
+	size   int64
+	client contentapi.ContentClient
+}
+
+func (ra *remoteReaderAt) Size() int64 {
+	return ra.size
+}
+
+func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+	rr := &contentapi.ReadContentRequest{
+		Digest: ra.digest,
+		Offset: off,
+		Size_:  int64(len(p)),
+	}
+	rc, err := ra.client.Read(ra.ctx, rr)
+	if err != nil {
+		return 0, err
+	}
+
+	for len(p) > 0 {
+		var resp *contentapi.ReadContentResponse
+		// fill our buffer up until we can fill p.
+		resp, err = rc.Recv()
+		if err != nil {
+			return n, err
+		}
+
+		copied := copy(p, resp.Data)
+		n += copied
+		p = p[copied:]
+	}
+	return n, nil
+}
+
+func (rr *remoteReaderAt) Close() error {
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/services/content/service.go b/vendor/github.com/containerd/containerd/services/content/service.go
new file mode 100644
index 0000000..8289e1e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/content/service.go
@@ -0,0 +1,453 @@
+package content
+
+import (
+	"io"
+	"sync"
+
+	api "github.com/containerd/containerd/api/services/content/v1"
+	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/metadata"
+	"github.com/containerd/containerd/plugin"
+	"github.com/golang/protobuf/ptypes/empty"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+)
+
+type Service struct {
+	store     content.Store
+	publisher events.Publisher
+}
+
+var bufPool = sync.Pool{
+	New: func() interface{} {
+		return make([]byte, 1<<20)
+	},
+}
+
+var _ api.ContentServer = &Service{}
+
+func init() {
+	plugin.Register(&plugin.Registration{
+		Type: plugin.GRPCPlugin,
+		ID:   "content",
+		Requires: []plugin.Type{
+			plugin.MetadataPlugin,
+		},
+		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+			m, err := ic.Get(plugin.MetadataPlugin)
+			if err != nil {
+				return nil, err
+			}
+
+			s, err := NewService(m.(*metadata.DB).ContentStore(), ic.Events)
+			return s, err
+		},
+	})
+}
+
+func NewService(cs content.Store, publisher events.Publisher) (*Service, error) {
+	return &Service{
+		store:     cs,
+		publisher: publisher,
+	}, nil
+}
+
+func (s *Service) Register(server *grpc.Server) error {
+	api.RegisterContentServer(server, s)
+	return nil
+}
+
+func (s *Service) Info(ctx context.Context, req *api.InfoRequest) (*api.InfoResponse, error) {
+	if err := req.Digest.Validate(); err != nil {
+		return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Digest)
+	}
+
+	bi, err := s.store.Info(ctx, req.Digest)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &api.InfoResponse{
+		Info: infoToGRPC(bi),
+	}, nil
+}
+
+func (s *Service) Update(ctx context.Context, req *api.UpdateRequest) (*api.UpdateResponse, error) {
+	if err := req.Info.Digest.Validate(); err != nil {
+		return nil, grpc.Errorf(codes.InvalidArgument, "%q failed validation", req.Info.Digest)
+	}
+
+	info, err := s.store.Update(ctx, infoFromGRPC(req.Info), req.UpdateMask.GetPaths()...)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &api.UpdateResponse{
+		Info: infoToGRPC(info),
+	}, nil
+}
+
+func (s *Service) List(req *api.ListContentRequest, session api.Content_ListServer) error {
+	var (
+		buffer    []api.Info
+		sendBlock = func(block []api.Info) error {
+			// send last block
+			return session.Send(&api.ListContentResponse{
+				Info: block,
+			})
+		}
+	)
+
+	if err := s.store.Walk(session.Context(), func(info content.Info) error {
+		buffer = append(buffer, api.Info{
+			Digest:    info.Digest,
+			Size_:     info.Size,
+			CreatedAt: info.CreatedAt,
+			Labels:    info.Labels,
+		})
+
+		if len(buffer) >= 100 {
+			if err := sendBlock(buffer); err != nil {
+				return err
+			}
+
+			buffer = buffer[:0]
+		}
+
+		return nil
+	}, req.Filters...); err != nil {
+		return err
+	}
+
+	if len(buffer) > 0 {
+		// send last block
+		if err := sendBlock(buffer); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *Service) Delete(ctx context.Context, req *api.DeleteContentRequest) (*empty.Empty, error) {
+	if err := req.Digest.Validate(); err != nil {
+		return nil, grpc.Errorf(codes.InvalidArgument, err.Error())
+	}
+
+	if err := s.store.Delete(ctx, req.Digest); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	if err := s.publisher.Publish(ctx, "/content/delete", &eventsapi.ContentDelete{
+		Digest: req.Digest,
+	}); err != nil {
+		return nil, err
+	}
+
+	return &empty.Empty{}, nil
+}
+
+func (s *Service) Read(req *api.ReadContentRequest, session api.Content_ReadServer) error {
+	if err := req.Digest.Validate(); err != nil {
+		return grpc.Errorf(codes.InvalidArgument, "%v: %v", req.Digest, err)
+	}
+
+	oi, err := s.store.Info(session.Context(), req.Digest)
+	if err != nil {
+		return errdefs.ToGRPC(err)
+	}
+
+	ra, err := s.store.ReaderAt(session.Context(), req.Digest)
+	if err != nil {
+		return errdefs.ToGRPC(err)
+	}
+	defer ra.Close()
+
+	var (
+		offset = req.Offset
+		size   = req.Size_
+
+		// TODO(stevvooe): Using the global buffer pool. At 32KB, it is probably
+		// little inefficient for work over a fast network. We can tune this later.
+		p = bufPool.Get().([]byte)
+	)
+	defer bufPool.Put(p)
+
+	if offset < 0 {
+		offset = 0
+	}
+
+	if size <= 0 {
+		size = oi.Size - offset
+	}
+
+	if offset+size > oi.Size {
+		return grpc.Errorf(codes.OutOfRange, "read past object length %v bytes", oi.Size)
+	}
+
+	if _, err := io.CopyBuffer(
+		&readResponseWriter{session: session},
+		io.NewSectionReader(ra, offset, size), p); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// readResponseWriter is a writer that places the output into ReadContentRequest messages.
+//
+// This allows io.CopyBuffer to do the heavy lifting of chunking the responses
+// into the buffer size.
+type readResponseWriter struct {
+	offset  int64
+	session api.Content_ReadServer
+}
+
+func (rw *readResponseWriter) Write(p []byte) (n int, err error) {
+	if err := rw.session.Send(&api.ReadContentResponse{
+		Offset: rw.offset,
+		Data:   p,
+	}); err != nil {
+		return 0, err
+	}
+
+	rw.offset += int64(len(p))
+	return len(p), nil
+}
+
+func (s *Service) Status(ctx context.Context, req *api.StatusRequest) (*api.StatusResponse, error) {
+	status, err := s.store.Status(ctx, req.Ref)
+	if err != nil {
+		return nil, errdefs.ToGRPCf(err, "could not get status for ref %q", req.Ref)
+	}
+
+	var resp api.StatusResponse
+	resp.Status = &api.Status{
+		StartedAt: status.StartedAt,
+		UpdatedAt: status.UpdatedAt,
+		Ref:       status.Ref,
+		Offset:    status.Offset,
+		Total:     status.Total,
+		Expected:  status.Expected,
+	}
+
+	return &resp, nil
+}
+
+func (s *Service) ListStatuses(ctx context.Context, req *api.ListStatusesRequest) (*api.ListStatusesResponse, error) {
+	statuses, err := s.store.ListStatuses(ctx, req.Filters...)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	var resp api.ListStatusesResponse
+	for _, status := range statuses {
+		resp.Statuses = append(resp.Statuses, api.Status{
+			StartedAt: status.StartedAt,
+			UpdatedAt: status.UpdatedAt,
+			Ref:       status.Ref,
+			Offset:    status.Offset,
+			Total:     status.Total,
+			Expected:  status.Expected,
+		})
+	}
+
+	return &resp, nil
+}
+
+func (s *Service) Write(session api.Content_WriteServer) (err error) {
+	var (
+		ctx      = session.Context()
+		msg      api.WriteContentResponse
+		req      *api.WriteContentRequest
+		ref      string
+		total    int64
+		expected digest.Digest
+	)
+
+	defer func(msg *api.WriteContentResponse) {
+		// pump through the last message if no error was encountered
+		if err != nil {
+			if grpc.Code(err) != codes.AlreadyExists {
+				// TODO(stevvooe): Really need a log line here to track which
+				// errors are actually causing failure on the server side. May want
+				// to configure the service with an interceptor to make this work
+				// identically across all GRPC methods.
+				//
+				// This is pretty noisy, so we can remove it but leave it for now.
+				log.G(ctx).WithError(err).Error("(*Service).Write failed")
+			}
+
+			return
+		}
+
+		err = session.Send(msg)
+	}(&msg)
+
+	// handle the very first request!
+	req, err = session.Recv()
+	if err != nil {
+		return err
+	}
+
+	ref = req.Ref
+
+	if ref == "" {
+		return grpc.Errorf(codes.InvalidArgument, "first message must have a reference")
+	}
+
+	fields := logrus.Fields{
+		"ref": ref,
+	}
+	total = req.Total
+	expected = req.Expected
+	if total > 0 {
+		fields["total"] = total
+	}
+
+	if expected != "" {
+		fields["expected"] = expected
+	}
+
+	ctx = log.WithLogger(ctx, log.G(ctx).WithFields(fields))
+
+	log.G(ctx).Debug("(*Service).Write started")
+	// this action locks the writer for the session.
+	wr, err := s.store.Writer(ctx, ref, total, expected)
+	if err != nil {
+		return errdefs.ToGRPC(err)
+	}
+	defer wr.Close()
+
+	for {
+		msg.Action = req.Action
+		ws, err := wr.Status()
+		if err != nil {
+			return errdefs.ToGRPC(err)
+		}
+
+		msg.Offset = ws.Offset // always set the offset.
+
+		// NOTE(stevvooe): In general, there are two cases underwhich a remote
+		// writer is used.
+		//
+		// For pull, we almost always have this before fetching large content,
+		// through descriptors. We allow predeclaration of the expected size
+		// and digest.
+		//
+		// For push, it is more complex. If we want to cut through content into
+		// storage, we may have no expectation until we are done processing the
+		// content. The case here is the following:
+		//
+		// 	1. Start writing content.
+		// 	2. Compress inline.
+		// 	3. Validate digest and size (maybe).
+		//
+		// Supporting these two paths is quite awkward but it lets both API
+		// users use the same writer style for each with a minimum of overhead.
+		if req.Expected != "" {
+			if expected != "" && expected != req.Expected {
+				return grpc.Errorf(codes.InvalidArgument, "inconsistent digest provided: %v != %v", req.Expected, expected)
+			}
+			expected = req.Expected
+
+			if _, err := s.store.Info(session.Context(), req.Expected); err == nil {
+				if err := s.store.Abort(session.Context(), ref); err != nil {
+					log.G(ctx).WithError(err).Error("failed to abort write")
+				}
+
+				return grpc.Errorf(codes.AlreadyExists, "blob with expected digest %v exists", req.Expected)
+			}
+		}
+
+		if req.Total > 0 {
+			// Update the expected total. Typically, this could be seen at
+			// negotiation time or on a commit message.
+			if total > 0 && req.Total != total {
+				return grpc.Errorf(codes.InvalidArgument, "inconsistent total provided: %v != %v", req.Total, total)
+			}
+			total = req.Total
+		}
+
+		switch req.Action {
+		case api.WriteActionStat:
+			msg.Digest = wr.Digest()
+			msg.StartedAt = ws.StartedAt
+			msg.UpdatedAt = ws.UpdatedAt
+			msg.Total = total
+		case api.WriteActionWrite, api.WriteActionCommit:
+			if req.Offset > 0 {
+				// validate the offset if provided
+				if req.Offset != ws.Offset {
+					return grpc.Errorf(codes.OutOfRange, "write @%v must occur at current offset %v", req.Offset, ws.Offset)
+				}
+			}
+
+			if req.Offset == 0 && ws.Offset > 0 {
+				if err := wr.Truncate(req.Offset); err != nil {
+					return errors.Wrapf(err, "truncate failed")
+				}
+				msg.Offset = req.Offset
+			}
+
+			// issue the write if we actually have data.
+			if len(req.Data) > 0 {
+				// While this looks like we could use io.WriterAt here, because we
+				// maintain the offset as append only, we just issue the write.
+				n, err := wr.Write(req.Data)
+				if err != nil {
+					return err
+				}
+
+				if n != len(req.Data) {
+					// TODO(stevvooe): Perhaps, we can recover this by including it
+					// in the offset on the write return.
+					return grpc.Errorf(codes.DataLoss, "wrote %v of %v bytes", n, len(req.Data))
+				}
+
+				msg.Offset += int64(n)
+			}
+
+			if req.Action == api.WriteActionCommit {
+				var opts []content.Opt
+				if req.Labels != nil {
+					opts = append(opts, content.WithLabels(req.Labels))
+				}
+				if err := wr.Commit(ctx, total, expected, opts...); err != nil {
+					return err
+				}
+			}
+
+			msg.Digest = wr.Digest()
+		}
+
+		if err := session.Send(&msg); err != nil {
+			return err
+		}
+
+		req, err = session.Recv()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+
+			return err
+		}
+	}
+}
+
+func (s *Service) Abort(ctx context.Context, req *api.AbortRequest) (*empty.Empty, error) {
+	if err := s.store.Abort(ctx, req.Ref); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &empty.Empty{}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/services/content/store.go b/vendor/github.com/containerd/containerd/services/content/store.go
new file mode 100644
index 0000000..11fcc03
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/content/store.go
@@ -0,0 +1,207 @@
+package content
+
+import (
+	"context"
+	"io"
+
+	contentapi "github.com/containerd/containerd/api/services/content/v1"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	protobuftypes "github.com/gogo/protobuf/types"
+	digest "github.com/opencontainers/go-digest"
+)
+
+type remoteStore struct {
+	client contentapi.ContentClient
+}
+
+func NewStoreFromClient(client contentapi.ContentClient) content.Store {
+	return &remoteStore{
+		client: client,
+	}
+}
+
+func (rs *remoteStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
+	resp, err := rs.client.Info(ctx, &contentapi.InfoRequest{
+		Digest: dgst,
+	})
+	if err != nil {
+		return content.Info{}, errdefs.FromGRPC(err)
+	}
+
+	return infoFromGRPC(resp.Info), nil
+}
+
+func (rs *remoteStore) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
+	session, err := rs.client.List(ctx, &contentapi.ListContentRequest{
+		Filters: filters,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+
+	for {
+		msg, err := session.Recv()
+		if err != nil {
+			if err != io.EOF {
+				return errdefs.FromGRPC(err)
+			}
+
+			break
+		}
+
+		for _, info := range msg.Info {
+			if err := fn(infoFromGRPC(info)); err != nil {
+				return err
+			}
+		}
+	}
+
+	return nil
+}
+
+func (rs *remoteStore) Delete(ctx context.Context, dgst digest.Digest) error {
+	if _, err := rs.client.Delete(ctx, &contentapi.DeleteContentRequest{
+		Digest: dgst,
+	}); err != nil {
+		return errdefs.FromGRPC(err)
+	}
+
+	return nil
+}
+
+func (rs *remoteStore) ReaderAt(ctx context.Context, dgst digest.Digest) (content.ReaderAt, error) {
+	i, err := rs.Info(ctx, dgst)
+	if err != nil {
+		return nil, err
+	}
+
+	return &remoteReaderAt{
+		ctx:    ctx,
+		digest: dgst,
+		size:   i.Size,
+		client: rs.client,
+	}, nil
+}
+
+func (rs *remoteStore) Status(ctx context.Context, ref string) (content.Status, error) {
+	resp, err := rs.client.Status(ctx, &contentapi.StatusRequest{
+		Ref: ref,
+	})
+	if err != nil {
+		return content.Status{}, errdefs.FromGRPC(err)
+	}
+
+	status := resp.Status
+	return content.Status{
+		Ref:       status.Ref,
+		StartedAt: status.StartedAt,
+		UpdatedAt: status.UpdatedAt,
+		Offset:    status.Offset,
+		Total:     status.Total,
+		Expected:  status.Expected,
+	}, nil
+}
+
+func (rs *remoteStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
+	resp, err := rs.client.Update(ctx, &contentapi.UpdateRequest{
+		Info: infoToGRPC(info),
+		UpdateMask: &protobuftypes.FieldMask{
+			Paths: fieldpaths,
+		},
+	})
+	if err != nil {
+		return content.Info{}, errdefs.FromGRPC(err)
+	}
+	return infoFromGRPC(resp.Info), nil
+}
+
+func (rs *remoteStore) ListStatuses(ctx context.Context, filters ...string) ([]content.Status, error) {
+	resp, err := rs.client.ListStatuses(ctx, &contentapi.ListStatusesRequest{
+		Filters: filters,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+
+	var statuses []content.Status
+	for _, status := range resp.Statuses {
+		statuses = append(statuses, content.Status{
+			Ref:       status.Ref,
+			StartedAt: status.StartedAt,
+			UpdatedAt: status.UpdatedAt,
+			Offset:    status.Offset,
+			Total:     status.Total,
+			Expected:  status.Expected,
+		})
+	}
+
+	return statuses, nil
+}
+
+func (rs *remoteStore) Writer(ctx context.Context, ref string, size int64, expected digest.Digest) (content.Writer, error) {
+	wrclient, offset, err := rs.negotiate(ctx, ref, size, expected)
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+
+	return &remoteWriter{
+		ref:    ref,
+		client: wrclient,
+		offset: offset,
+	}, nil
+}
+
+// Abort implements asynchronous abort. It starts a new write session on the ref l
+func (rs *remoteStore) Abort(ctx context.Context, ref string) error {
+	if _, err := rs.client.Abort(ctx, &contentapi.AbortRequest{
+		Ref: ref,
+	}); err != nil {
+		return errdefs.FromGRPC(err)
+	}
+
+	return nil
+}
+
+func (rs *remoteStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
+	wrclient, err := rs.client.Write(ctx)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	if err := wrclient.Send(&contentapi.WriteContentRequest{
+		Action:   contentapi.WriteActionStat,
+		Ref:      ref,
+		Total:    size,
+		Expected: expected,
+	}); err != nil {
+		return nil, 0, err
+	}
+
+	resp, err := wrclient.Recv()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	return wrclient, resp.Offset, nil
+}
+
+func infoToGRPC(info content.Info) contentapi.Info {
+	return contentapi.Info{
+		Digest:    info.Digest,
+		Size_:     info.Size,
+		CreatedAt: info.CreatedAt,
+		UpdatedAt: info.UpdatedAt,
+		Labels:    info.Labels,
+	}
+}
+
+func infoFromGRPC(info contentapi.Info) content.Info {
+	return content.Info{
+		Digest:    info.Digest,
+		Size:      info.Size_,
+		CreatedAt: info.CreatedAt,
+		UpdatedAt: info.UpdatedAt,
+		Labels:    info.Labels,
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/services/content/writer.go b/vendor/github.com/containerd/containerd/services/content/writer.go
new file mode 100644
index 0000000..cb45957
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/content/writer.go
@@ -0,0 +1,123 @@
+package content
+
+import (
+	"context"
+	"io"
+
+	contentapi "github.com/containerd/containerd/api/services/content/v1"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/errdefs"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/pkg/errors"
+)
+
+type remoteWriter struct {
+	ref    string
+	client contentapi.Content_WriteClient
+	offset int64
+	digest digest.Digest
+}
+
+// send performs a synchronous req-resp cycle on the client.
+func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.WriteContentResponse, error) {
+	if err := rw.client.Send(req); err != nil {
+		return nil, err
+	}
+
+	resp, err := rw.client.Recv()
+
+	if err == nil {
+		// try to keep these in sync
+		if resp.Digest != "" {
+			rw.digest = resp.Digest
+		}
+	}
+
+	return resp, err
+}
+
+func (rw *remoteWriter) Status() (content.Status, error) {
+	resp, err := rw.send(&contentapi.WriteContentRequest{
+		Action: contentapi.WriteActionStat,
+	})
+	if err != nil {
+		return content.Status{}, err
+	}
+
+	return content.Status{
+		Ref:       rw.ref,
+		Offset:    resp.Offset,
+		Total:     resp.Total,
+		StartedAt: resp.StartedAt,
+		UpdatedAt: resp.UpdatedAt,
+	}, nil
+}
+
+func (rw *remoteWriter) Digest() digest.Digest {
+	return rw.digest
+}
+
+func (rw *remoteWriter) Write(p []byte) (n int, err error) {
+	offset := rw.offset
+
+	resp, err := rw.send(&contentapi.WriteContentRequest{
+		Action: contentapi.WriteActionWrite,
+		Offset: offset,
+		Data:   p,
+	})
+	if err != nil {
+		return 0, err
+	}
+
+	n = int(resp.Offset - offset)
+	if n < len(p) {
+		err = io.ErrShortWrite
+	}
+
+	rw.offset += int64(n)
+	if resp.Digest != "" {
+		rw.digest = resp.Digest
+	}
+	return
+}
+
+func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
+	var base content.Info
+	for _, opt := range opts {
+		if err := opt(&base); err != nil {
+			return err
+		}
+	}
+	resp, err := rw.send(&contentapi.WriteContentRequest{
+		Action:   contentapi.WriteActionCommit,
+		Total:    size,
+		Offset:   rw.offset,
+		Expected: expected,
+		Labels:   base.Labels,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+
+	if size != 0 && resp.Offset != size {
+		return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
+	}
+
+	if expected != "" && resp.Digest != expected {
+		return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
+	}
+
+	rw.digest = resp.Digest
+	rw.offset = resp.Offset
+	return nil
+}
+
+func (rw *remoteWriter) Truncate(size int64) error {
+	// This truncation won't actually be validated until a write is issued.
+	rw.offset = size
+	return nil
+}
+
+func (rw *remoteWriter) Close() error {
+	return rw.client.CloseSend()
+}
diff --git a/vendor/github.com/containerd/containerd/services/diff/client.go b/vendor/github.com/containerd/containerd/services/diff/client.go
new file mode 100644
index 0000000..5267f97
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/diff/client.go
@@ -0,0 +1,83 @@
+package diff
+
+import (
+	diffapi "github.com/containerd/containerd/api/services/diff/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/diff"
+	"github.com/containerd/containerd/mount"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"golang.org/x/net/context"
+)
+
+// NewApplierFromClient returns a new Applier which communicates
+// over a GRPC connection.
+func NewDiffServiceFromClient(client diffapi.DiffClient) diff.Differ {
+	return &remote{
+		client: client,
+	}
+}
+
+type remote struct {
+	client diffapi.DiffClient
+}
+
+func (r *remote) Apply(ctx context.Context, diff ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
+	req := &diffapi.ApplyRequest{
+		Diff:   fromDescriptor(diff),
+		Mounts: fromMounts(mounts),
+	}
+	resp, err := r.client.Apply(ctx, req)
+	if err != nil {
+		return ocispec.Descriptor{}, err
+	}
+	return toDescriptor(resp.Applied), nil
+}
+
+func (r *remote) DiffMounts(ctx context.Context, a, b []mount.Mount, opts ...diff.Opt) (ocispec.Descriptor, error) {
+	var config diff.Config
+	for _, opt := range opts {
+		if err := opt(&config); err != nil {
+			return ocispec.Descriptor{}, err
+		}
+	}
+	req := &diffapi.DiffRequest{
+		Left:      fromMounts(a),
+		Right:     fromMounts(b),
+		MediaType: config.MediaType,
+		Ref:       config.Reference,
+		Labels:    config.Labels,
+	}
+	resp, err := r.client.Diff(ctx, req)
+	if err != nil {
+		return ocispec.Descriptor{}, err
+	}
+	return toDescriptor(resp.Diff), nil
+}
+
+func toDescriptor(d *types.Descriptor) ocispec.Descriptor {
+	return ocispec.Descriptor{
+		MediaType: d.MediaType,
+		Digest:    d.Digest,
+		Size:      d.Size_,
+	}
+}
+
+func fromDescriptor(d ocispec.Descriptor) *types.Descriptor {
+	return &types.Descriptor{
+		MediaType: d.MediaType,
+		Digest:    d.Digest,
+		Size_:     d.Size,
+	}
+}
+
+func fromMounts(mounts []mount.Mount) []*types.Mount {
+	apiMounts := make([]*types.Mount, len(mounts))
+	for i, m := range mounts {
+		apiMounts[i] = &types.Mount{
+			Type:    m.Type,
+			Source:  m.Source,
+			Options: m.Options,
+		}
+	}
+	return apiMounts
+}
diff --git a/vendor/github.com/containerd/containerd/services/diff/service.go b/vendor/github.com/containerd/containerd/services/diff/service.go
new file mode 100644
index 0000000..81e44dc
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/diff/service.go
@@ -0,0 +1,142 @@
+package diff
+
+import (
+	diffapi "github.com/containerd/containerd/api/services/diff/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/diff"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/plugin"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+)
+
+type config struct {
+	// Order is the order of preference in which to try diff algorithms, the
+	// first differ which is supported is used.
+	// Note when multiple differs may be supported, this order will be
+	// respected for which is choosen. Each differ should return the same
+	// correct output, allowing any ordering to be used to prefer
+	// more optimimal implementations.
+	Order []string `toml:"default"`
+}
+
+func init() {
+	plugin.Register(&plugin.Registration{
+		Type: plugin.GRPCPlugin,
+		ID:   "diff",
+		Requires: []plugin.Type{
+			plugin.DiffPlugin,
+		},
+		Config: &config{
+			Order: []string{"walking"},
+		},
+		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+			differs, err := ic.GetByType(plugin.DiffPlugin)
+			if err != nil {
+				return nil, err
+			}
+
+			orderedNames := ic.Config.(*config).Order
+			ordered := make([]diff.Differ, len(orderedNames))
+			for i, n := range orderedNames {
+				differp, ok := differs[n]
+				if !ok {
+					return nil, errors.Errorf("needed differ not loaded: %s", n)
+				}
+				differ, err := differp.Instance()
+				if err != nil {
+					return nil, errors.Wrapf(err, "could not load required differ due plugin init error: %s", n)
+				}
+
+				ordered[i] = differ.(diff.Differ)
+			}
+
+			return &service{
+				differs: ordered,
+			}, nil
+		},
+	})
+}
+
+type service struct {
+	differs []diff.Differ
+}
+
+func (s *service) Register(gs *grpc.Server) error {
+	diffapi.RegisterDiffServer(gs, s)
+	return nil
+}
+
+func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) {
+	var (
+		ocidesc ocispec.Descriptor
+		err     error
+		desc    = toDescriptor(er.Diff)
+		mounts  = toMounts(er.Mounts)
+	)
+
+	for _, differ := range s.differs {
+		ocidesc, err = differ.Apply(ctx, desc, mounts)
+		if !errdefs.IsNotImplemented(err) {
+			break
+		}
+	}
+
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &diffapi.ApplyResponse{
+		Applied: fromDescriptor(ocidesc),
+	}, nil
+
+}
+
+func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) {
+	var (
+		ocidesc ocispec.Descriptor
+		err     error
+		aMounts = toMounts(dr.Left)
+		bMounts = toMounts(dr.Right)
+	)
+
+	var opts []diff.Opt
+	if dr.MediaType != "" {
+		opts = append(opts, diff.WithMediaType(dr.MediaType))
+	}
+	if dr.Ref != "" {
+		opts = append(opts, diff.WithReference(dr.Ref))
+	}
+	if dr.Labels != nil {
+		opts = append(opts, diff.WithLabels(dr.Labels))
+	}
+
+	for _, differ := range s.differs {
+		ocidesc, err = differ.DiffMounts(ctx, aMounts, bMounts, opts...)
+		if !errdefs.IsNotImplemented(err) {
+			break
+		}
+	}
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &diffapi.DiffResponse{
+		Diff: fromDescriptor(ocidesc),
+	}, nil
+}
+
+func toMounts(apim []*types.Mount) []mount.Mount {
+	mounts := make([]mount.Mount, len(apim))
+	for i, m := range apim {
+		mounts[i] = mount.Mount{
+			Type:    m.Type,
+			Source:  m.Source,
+			Options: m.Options,
+		}
+	}
+	return mounts
+}
diff --git a/vendor/github.com/containerd/containerd/services/images/client.go b/vendor/github.com/containerd/containerd/services/images/client.go
new file mode 100644
index 0000000..eebe776
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/images/client.go
@@ -0,0 +1,80 @@
+package images
+
+import (
+	"context"
+
+	imagesapi "github.com/containerd/containerd/api/services/images/v1"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	ptypes "github.com/gogo/protobuf/types"
+)
+
+type remoteStore struct {
+	client imagesapi.ImagesClient
+}
+
+func NewStoreFromClient(client imagesapi.ImagesClient) images.Store {
+	return &remoteStore{
+		client: client,
+	}
+}
+
+func (s *remoteStore) Get(ctx context.Context, name string) (images.Image, error) {
+	resp, err := s.client.Get(ctx, &imagesapi.GetImageRequest{
+		Name: name,
+	})
+	if err != nil {
+		return images.Image{}, errdefs.FromGRPC(err)
+	}
+
+	return imageFromProto(resp.Image), nil
+}
+
+func (s *remoteStore) List(ctx context.Context, filters ...string) ([]images.Image, error) {
+	resp, err := s.client.List(ctx, &imagesapi.ListImagesRequest{
+		Filters: filters,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+
+	return imagesFromProto(resp.Images), nil
+}
+
+func (s *remoteStore) Create(ctx context.Context, image images.Image) (images.Image, error) {
+	created, err := s.client.Create(ctx, &imagesapi.CreateImageRequest{
+		Image: imageToProto(&image),
+	})
+	if err != nil {
+		return images.Image{}, errdefs.FromGRPC(err)
+	}
+
+	return imageFromProto(&created.Image), nil
+}
+
+func (s *remoteStore) Update(ctx context.Context, image images.Image, fieldpaths ...string) (images.Image, error) {
+	var updateMask *ptypes.FieldMask
+	if len(fieldpaths) > 0 {
+		updateMask = &ptypes.FieldMask{
+			Paths: fieldpaths,
+		}
+	}
+
+	updated, err := s.client.Update(ctx, &imagesapi.UpdateImageRequest{
+		Image:      imageToProto(&image),
+		UpdateMask: updateMask,
+	})
+	if err != nil {
+		return images.Image{}, errdefs.FromGRPC(err)
+	}
+
+	return imageFromProto(&updated.Image), nil
+}
+
+func (s *remoteStore) Delete(ctx context.Context, name string) error {
+	_, err := s.client.Delete(ctx, &imagesapi.DeleteImageRequest{
+		Name: name,
+	})
+
+	return errdefs.FromGRPC(err)
+}
diff --git a/vendor/github.com/containerd/containerd/services/images/helpers.go b/vendor/github.com/containerd/containerd/services/images/helpers.go
new file mode 100644
index 0000000..374aefd
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/images/helpers.go
@@ -0,0 +1,64 @@
+package images
+
+import (
+	imagesapi "github.com/containerd/containerd/api/services/images/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/images"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+func imagesToProto(images []images.Image) []imagesapi.Image {
+	var imagespb []imagesapi.Image
+
+	for _, image := range images {
+		imagespb = append(imagespb, imageToProto(&image))
+	}
+
+	return imagespb
+}
+
+func imagesFromProto(imagespb []imagesapi.Image) []images.Image {
+	var images []images.Image
+
+	for _, image := range imagespb {
+		images = append(images, imageFromProto(&image))
+	}
+
+	return images
+}
+
+func imageToProto(image *images.Image) imagesapi.Image {
+	return imagesapi.Image{
+		Name:      image.Name,
+		Labels:    image.Labels,
+		Target:    descToProto(&image.Target),
+		CreatedAt: image.CreatedAt,
+		UpdatedAt: image.UpdatedAt,
+	}
+}
+
+func imageFromProto(imagepb *imagesapi.Image) images.Image {
+	return images.Image{
+		Name:      imagepb.Name,
+		Labels:    imagepb.Labels,
+		Target:    descFromProto(&imagepb.Target),
+		CreatedAt: imagepb.CreatedAt,
+		UpdatedAt: imagepb.UpdatedAt,
+	}
+}
+
+func descFromProto(desc *types.Descriptor) ocispec.Descriptor {
+	return ocispec.Descriptor{
+		MediaType: desc.MediaType,
+		Size:      desc.Size_,
+		Digest:    desc.Digest,
+	}
+}
+
+func descToProto(desc *ocispec.Descriptor) types.Descriptor {
+	return types.Descriptor{
+		MediaType: desc.MediaType,
+		Size_:     desc.Size,
+		Digest:    desc.Digest,
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/services/images/service.go b/vendor/github.com/containerd/containerd/services/images/service.go
new file mode 100644
index 0000000..fa8a00a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/images/service.go
@@ -0,0 +1,182 @@
+package images
+
+import (
+	"github.com/boltdb/bolt"
+	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	imagesapi "github.com/containerd/containerd/api/services/images/v1"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/metadata"
+	"github.com/containerd/containerd/plugin"
+	"github.com/golang/protobuf/ptypes/empty"
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+func init() {
+	plugin.Register(&plugin.Registration{
+		Type: plugin.GRPCPlugin,
+		ID:   "images",
+		Requires: []plugin.Type{
+			plugin.MetadataPlugin,
+		},
+		InitFn: func(ic *plugin.InitContext) (interface{}, error) {
+			m, err := ic.Get(plugin.MetadataPlugin)
+			if err != nil {
+				return nil, err
+			}
+			return NewService(m.(*metadata.DB), ic.Events), nil
+		},
+	})
+}
+
+type Service struct {
+	db        *metadata.DB
+	publisher events.Publisher
+}
+
+func NewService(db *metadata.DB, publisher events.Publisher) imagesapi.ImagesServer {
+	return &Service{
+		db:        db,
+		publisher: publisher,
+	}
+}
+
+func (s *Service) Register(server *grpc.Server) error {
+	imagesapi.RegisterImagesServer(server, s)
+	return nil
+}
+
+func (s *Service) Get(ctx context.Context, req *imagesapi.GetImageRequest) (*imagesapi.GetImageResponse, error) {
+	var resp imagesapi.GetImageResponse
+
+	return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store images.Store) error {
+		image, err := store.Get(ctx, req.Name)
+		if err != nil {
+			return err
+		}
+		imagepb := imageToProto(&image)
+		resp.Image = &imagepb
+		return nil
+	}))
+}
+
+func (s *Service) List(ctx context.Context, req *imagesapi.ListImagesRequest) (*imagesapi.ListImagesResponse, error) {
+	var resp imagesapi.ListImagesResponse
+
+	return &resp, errdefs.ToGRPC(s.withStoreView(ctx, func(ctx context.Context, store images.Store) error {
+		images, err := store.List(ctx, req.Filters...)
+		if err != nil {
+			return err
+		}
+
+		resp.Images = imagesToProto(images)
+		return nil
+	}))
+}
+
+func (s *Service) Create(ctx context.Context, req *imagesapi.CreateImageRequest) (*imagesapi.CreateImageResponse, error) {
+	if req.Image.Name == "" {
+		return nil, status.Errorf(codes.InvalidArgument, "Image.Name required")
+	}
+
+	var (
+		image = imageFromProto(&req.Image)
+		resp  imagesapi.CreateImageResponse
+	)
+	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error {
+		created, err := store.Create(ctx, image)
+		if err != nil {
+			return err
+		}
+
+		resp.Image = imageToProto(&created)
+		return nil
+	}); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	if err := s.publisher.Publish(ctx, "/images/create", &eventsapi.ImageCreate{
+		Name:   resp.Image.Name,
+		Labels: resp.Image.Labels,
+	}); err != nil {
+		return nil, err
+	}
+
+	return &resp, nil
+
+}
+
+func (s *Service) Update(ctx context.Context, req *imagesapi.UpdateImageRequest) (*imagesapi.UpdateImageResponse, error) {
+	if req.Image.Name == "" {
+		return nil, status.Errorf(codes.InvalidArgument, "Image.Name required")
+	}
+
+	var (
+		image = imageFromProto(&req.Image)
+		resp  imagesapi.UpdateImageResponse
+	)
+	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error {
+		var fieldpaths []string
+		if req.UpdateMask != nil && len(req.UpdateMask.Paths) > 0 {
+			for _, path := range req.UpdateMask.Paths {
+				fieldpaths = append(fieldpaths, path)
+			}
+		}
+
+		updated, err := store.Update(ctx, image, fieldpaths...)
+		if err != nil {
+			return err
+		}
+
+		resp.Image = imageToProto(&updated)
+		return nil
+	}); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	if err := s.publisher.Publish(ctx, "/images/update", &eventsapi.ImageUpdate{
+		Name:   resp.Image.Name,
+		Labels: resp.Image.Labels,
+	}); err != nil {
+		return nil, err
+	}
+
+	return &resp, nil
+}
+
+func (s *Service) Delete(ctx context.Context, req *imagesapi.DeleteImageRequest) (*empty.Empty, error) {
+	if err := s.withStoreUpdate(ctx, func(ctx context.Context, store images.Store) error {
+		return errdefs.ToGRPC(store.Delete(ctx, req.Name))
+	}); err != nil {
+		return nil, err
+	}
+
+	if err := s.publisher.Publish(ctx, "/images/delete", &eventsapi.ImageDelete{
+		Name: req.Name,
+	}); err != nil {
+		return nil, err
+	}
+
+	if err := s.db.GarbageCollect(ctx); err != nil {
+		return nil, errdefs.ToGRPC(errors.Wrap(err, "garbage collection failed"))
+	}
+
+	return &empty.Empty{}, nil
+}
+
+func (s *Service) withStore(ctx context.Context, fn func(ctx context.Context, store images.Store) error) func(tx *bolt.Tx) error {
+	return func(tx *bolt.Tx) error { return fn(ctx, metadata.NewImageStore(tx)) }
+}
+
+func (s *Service) withStoreView(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error {
+	return s.db.View(s.withStore(ctx, fn))
+}
+
+func (s *Service) withStoreUpdate(ctx context.Context, fn func(ctx context.Context, store images.Store) error) error {
+	return s.db.Update(s.withStore(ctx, fn))
+}
diff --git a/vendor/github.com/containerd/containerd/services/snapshot/client.go b/vendor/github.com/containerd/containerd/services/snapshot/client.go
new file mode 100644
index 0000000..a9b9ffe
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/snapshot/client.go
@@ -0,0 +1,204 @@
+package snapshot
+
+import (
+	"context"
+	"io"
+
+	snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/snapshot"
+	protobuftypes "github.com/gogo/protobuf/types"
+)
+
+// NewSnapshotterFromClient returns a new Snapshotter which communicates
+// over a GRPC connection.
+func NewSnapshotterFromClient(client snapshotapi.SnapshotsClient, snapshotterName string) snapshot.Snapshotter {
+	return &remoteSnapshotter{
+		client:          client,
+		snapshotterName: snapshotterName,
+	}
+}
+
+type remoteSnapshotter struct {
+	client          snapshotapi.SnapshotsClient
+	snapshotterName string
+}
+
+func (r *remoteSnapshotter) Stat(ctx context.Context, key string) (snapshot.Info, error) {
+	resp, err := r.client.Stat(ctx,
+		&snapshotapi.StatSnapshotRequest{
+			Snapshotter: r.snapshotterName,
+			Key:         key,
+		})
+	if err != nil {
+		return snapshot.Info{}, errdefs.FromGRPC(err)
+	}
+	return toInfo(resp.Info), nil
+}
+
+func (r *remoteSnapshotter) Update(ctx context.Context, info snapshot.Info, fieldpaths ...string) (snapshot.Info, error) {
+	resp, err := r.client.Update(ctx,
+		&snapshotapi.UpdateSnapshotRequest{
+			Snapshotter: r.snapshotterName,
+			Info:        fromInfo(info),
+			UpdateMask: &protobuftypes.FieldMask{
+				Paths: fieldpaths,
+			},
+		})
+	if err != nil {
+		return snapshot.Info{}, errdefs.FromGRPC(err)
+	}
+	return toInfo(resp.Info), nil
+}
+
+func (r *remoteSnapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, error) {
+	resp, err := r.client.Usage(ctx, &snapshotapi.UsageRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+	})
+	if err != nil {
+		return snapshot.Usage{}, errdefs.FromGRPC(err)
+	}
+	return toUsage(resp), nil
+}
+
+func (r *remoteSnapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
+	resp, err := r.client.Mounts(ctx, &snapshotapi.MountsRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return toMounts(resp.Mounts), nil
+}
+
+func (r *remoteSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
+	var local snapshot.Info
+	for _, opt := range opts {
+		if err := opt(&local); err != nil {
+			return nil, err
+		}
+	}
+	resp, err := r.client.Prepare(ctx, &snapshotapi.PrepareSnapshotRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+		Parent:      parent,
+		Labels:      local.Labels,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return toMounts(resp.Mounts), nil
+}
+
+func (r *remoteSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshot.Opt) ([]mount.Mount, error) {
+	var local snapshot.Info
+	for _, opt := range opts {
+		if err := opt(&local); err != nil {
+			return nil, err
+		}
+	}
+	resp, err := r.client.View(ctx, &snapshotapi.ViewSnapshotRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+		Parent:      parent,
+		Labels:      local.Labels,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return toMounts(resp.Mounts), nil
+}
+
+func (r *remoteSnapshotter) Commit(ctx context.Context, name, key string, opts ...snapshot.Opt) error {
+	var local snapshot.Info
+	for _, opt := range opts {
+		if err := opt(&local); err != nil {
+			return err
+		}
+	}
+	_, err := r.client.Commit(ctx, &snapshotapi.CommitSnapshotRequest{
+		Snapshotter: r.snapshotterName,
+		Name:        name,
+		Key:         key,
+		Labels:      local.Labels,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (r *remoteSnapshotter) Remove(ctx context.Context, key string) error {
+	_, err := r.client.Remove(ctx, &snapshotapi.RemoveSnapshotRequest{
+		Snapshotter: r.snapshotterName,
+		Key:         key,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, snapshot.Info) error) error {
+	sc, err := r.client.List(ctx, &snapshotapi.ListSnapshotsRequest{
+		Snapshotter: r.snapshotterName,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	for {
+		resp, err := sc.Recv()
+		if err != nil {
+			if err == io.EOF {
+				return nil
+			}
+			return errdefs.FromGRPC(err)
+		}
+		if resp == nil {
+			return nil
+		}
+		for _, info := range resp.Info {
+			if err := fn(ctx, toInfo(info)); err != nil {
+				return err
+			}
+		}
+	}
+}
+
+func toKind(kind snapshotapi.Kind) snapshot.Kind {
+	if kind == snapshotapi.KindActive {
+		return snapshot.KindActive
+	}
+	if kind == snapshotapi.KindView {
+		return snapshot.KindView
+	}
+	return snapshot.KindCommitted
+}
+
+func toInfo(info snapshotapi.Info) snapshot.Info {
+	return snapshot.Info{
+		Name:    info.Name,
+		Parent:  info.Parent,
+		Kind:    toKind(info.Kind),
+		Created: info.CreatedAt,
+		Updated: info.UpdatedAt,
+		Labels:  info.Labels,
+	}
+}
+
+func toUsage(resp *snapshotapi.UsageResponse) snapshot.Usage {
+	return snapshot.Usage{
+		Inodes: resp.Inodes,
+		Size:   resp.Size_,
+	}
+}
+
+func toMounts(mm []*types.Mount) []mount.Mount {
+	mounts := make([]mount.Mount, len(mm))
+	for i, m := range mm {
+		mounts[i] = mount.Mount{
+			Type:    m.Type,
+			Source:  m.Source,
+			Options: m.Options,
+		}
+	}
+	return mounts
+}
diff --git a/vendor/github.com/containerd/containerd/services/snapshot/service.go b/vendor/github.com/containerd/containerd/services/snapshot/service.go
new file mode 100644
index 0000000..716b4c4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/services/snapshot/service.go
@@ -0,0 +1,295 @@
+package snapshot
+
+import (
+	gocontext "context"
+
+	eventsapi "github.com/containerd/containerd/api/services/events/v1"
+	snapshotapi "github.com/containerd/containerd/api/services/snapshot/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/events"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/metadata"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/plugin"
+	"github.com/containerd/containerd/snapshot"
+	protoempty "github.com/golang/protobuf/ptypes/empty"
+	"golang.org/x/net/context"
+	"google.golang.org/grpc"
+)
+
+func init() {
+	plugin.Register(&plugin.Registration{
+		Type: plugin.GRPCPlugin,
+		ID:   "snapshots",
+		Requires: []plugin.Type{
+			plugin.MetadataPlugin,
+		},
+		InitFn: newService,
+	})
+}
+
+var empty = &protoempty.Empty{}
+
+type service struct {
+	db        *metadata.DB
+	publisher events.Publisher
+}
+
+func newService(ic *plugin.InitContext) (interface{}, error) {
+	md, err := ic.Get(plugin.MetadataPlugin)
+	if err != nil {
+		return nil, err
+	}
+
+	return &service{
+		db:        md.(*metadata.DB),
+		publisher: ic.Events,
+	}, nil
+}
+
+func (s *service) getSnapshotter(name string) (snapshot.Snapshotter, error) {
+	if name == "" {
+		return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter argument missing")
+	}
+
+	sn := s.db.Snapshotter(name)
+	if sn == nil {
+		return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter not loaded: %s", name)
+	}
+	return sn, nil
+}
+
+func (s *service) Register(gs *grpc.Server) error {
+	snapshotapi.RegisterSnapshotsServer(gs, s)
+	return nil
+}
+
+func (s *service) Prepare(ctx context.Context, pr *snapshotapi.PrepareSnapshotRequest) (*snapshotapi.PrepareSnapshotResponse, error) {
+	log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing snapshot")
+	sn, err := s.getSnapshotter(pr.Snapshotter)
+	if err != nil {
+		return nil, err
+	}
+
+	var opts []snapshot.Opt
+	if pr.Labels != nil {
+		opts = append(opts, snapshot.WithLabels(pr.Labels))
+	}
+	mounts, err := sn.Prepare(ctx, pr.Key, pr.Parent, opts...)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	if err := s.publisher.Publish(ctx, "/snapshot/prepare", &eventsapi.SnapshotPrepare{
+		Key:    pr.Key,
+		Parent: pr.Parent,
+	}); err != nil {
+		return nil, err
+	}
+	return &snapshotapi.PrepareSnapshotResponse{
+		Mounts: fromMounts(mounts),
+	}, nil
+}
+
+func (s *service) View(ctx context.Context, pr *snapshotapi.ViewSnapshotRequest) (*snapshotapi.ViewSnapshotResponse, error) {
+	log.G(ctx).WithField("parent", pr.Parent).WithField("key", pr.Key).Debugf("Preparing view snapshot")
+	sn, err := s.getSnapshotter(pr.Snapshotter)
+	if err != nil {
+		return nil, err
+	}
+	var opts []snapshot.Opt
+	if pr.Labels != nil {
+		opts = append(opts, snapshot.WithLabels(pr.Labels))
+	}
+	mounts, err := sn.View(ctx, pr.Key, pr.Parent, opts...)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	return &snapshotapi.ViewSnapshotResponse{
+		Mounts: fromMounts(mounts),
+	}, nil
+}
+
+func (s *service) Mounts(ctx context.Context, mr *snapshotapi.MountsRequest) (*snapshotapi.MountsResponse, error) {
+	log.G(ctx).WithField("key", mr.Key).Debugf("Getting snapshot mounts")
+	sn, err := s.getSnapshotter(mr.Snapshotter)
+	if err != nil {
+		return nil, err
+	}
+
+	mounts, err := sn.Mounts(ctx, mr.Key)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+	return &snapshotapi.MountsResponse{
+		Mounts: fromMounts(mounts),
+	}, nil
+}
+
+func (s *service) Commit(ctx context.Context, cr *snapshotapi.CommitSnapshotRequest) (*protoempty.Empty, error) {
+	log.G(ctx).WithField("key", cr.Key).WithField("name", cr.Name).Debugf("Committing snapshot")
+	sn, err := s.getSnapshotter(cr.Snapshotter)
+	if err != nil {
+		return nil, err
+	}
+
+	var opts []snapshot.Opt
+	if cr.Labels != nil {
+		opts = append(opts, snapshot.WithLabels(cr.Labels))
+	}
+	if err := sn.Commit(ctx, cr.Name, cr.Key, opts...); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	if err := s.publisher.Publish(ctx, "/snapshot/commit", &eventsapi.SnapshotCommit{
+		Key:  cr.Key,
+		Name: cr.Name,
+	}); err != nil {
+		return nil, err
+	}
+	return empty, nil
+}
+
+func (s *service) Remove(ctx context.Context, rr *snapshotapi.RemoveSnapshotRequest) (*protoempty.Empty, error) {
+	log.G(ctx).WithField("key", rr.Key).Debugf("Removing snapshot")
+	sn, err := s.getSnapshotter(rr.Snapshotter)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := sn.Remove(ctx, rr.Key); err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	if err := s.publisher.Publish(ctx, "/snapshot/remove", &eventsapi.SnapshotRemove{
+		Key: rr.Key,
+	}); err != nil {
+		return nil, err
+	}
+	return empty, nil
+}
+
+func (s *service) Stat(ctx context.Context, sr *snapshotapi.StatSnapshotRequest) (*snapshotapi.StatSnapshotResponse, error) {
+	log.G(ctx).WithField("key", sr.Key).Debugf("Statting snapshot")
+	sn, err := s.getSnapshotter(sr.Snapshotter)
+	if err != nil {
+		return nil, err
+	}
+
+	info, err := sn.Stat(ctx, sr.Key)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &snapshotapi.StatSnapshotResponse{Info: fromInfo(info)}, nil
+}
+
+func (s *service) Update(ctx context.Context, sr *snapshotapi.UpdateSnapshotRequest) (*snapshotapi.UpdateSnapshotResponse, error) {
+	log.G(ctx).WithField("key", sr.Info.Name).Debugf("Updating snapshot")
+	sn, err := s.getSnapshotter(sr.Snapshotter)
+	if err != nil {
+		return nil, err
+	}
+
+	info, err := sn.Update(ctx, toInfo(sr.Info), sr.UpdateMask.GetPaths()...)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return &snapshotapi.UpdateSnapshotResponse{Info: fromInfo(info)}, nil
+}
+
+func (s *service) List(sr *snapshotapi.ListSnapshotsRequest, ss snapshotapi.Snapshots_ListServer) error {
+	sn, err := s.getSnapshotter(sr.Snapshotter)
+	if err != nil {
+		return err
+	}
+
+	var (
+		buffer    []snapshotapi.Info
+		sendBlock = func(block []snapshotapi.Info) error {
+			return ss.Send(&snapshotapi.ListSnapshotsResponse{
+				Info: block,
+			})
+		}
+	)
+	err = sn.Walk(ss.Context(), func(ctx gocontext.Context, info snapshot.Info) error {
+		buffer = append(buffer, fromInfo(info))
+
+		if len(buffer) >= 100 {
+			if err := sendBlock(buffer); err != nil {
+				return err
+			}
+
+			buffer = buffer[:0]
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+	if len(buffer) > 0 {
+		// Send remaining infos
+		if err := sendBlock(buffer); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (s *service) Usage(ctx context.Context, ur *snapshotapi.UsageRequest) (*snapshotapi.UsageResponse, error) {
+	sn, err := s.getSnapshotter(ur.Snapshotter)
+	if err != nil {
+		return nil, err
+	}
+
+	usage, err := sn.Usage(ctx, ur.Key)
+	if err != nil {
+		return nil, errdefs.ToGRPC(err)
+	}
+
+	return fromUsage(usage), nil
+}
+
+func fromKind(kind snapshot.Kind) snapshotapi.Kind {
+	if kind == snapshot.KindActive {
+		return snapshotapi.KindActive
+	}
+	if kind == snapshot.KindView {
+		return snapshotapi.KindView
+	}
+	return snapshotapi.KindCommitted
+}
+
+func fromInfo(info snapshot.Info) snapshotapi.Info {
+	return snapshotapi.Info{
+		Name:      info.Name,
+		Parent:    info.Parent,
+		Kind:      fromKind(info.Kind),
+		CreatedAt: info.Created,
+		UpdatedAt: info.Updated,
+		Labels:    info.Labels,
+	}
+}
+
+func fromUsage(usage snapshot.Usage) *snapshotapi.UsageResponse {
+	return &snapshotapi.UsageResponse{
+		Inodes: usage.Inodes,
+		Size_:  usage.Size,
+	}
+}
+
+func fromMounts(mounts []mount.Mount) []*types.Mount {
+	out := make([]*types.Mount, len(mounts))
+	for i, m := range mounts {
+		out[i] = &types.Mount{
+			Type:    m.Type,
+			Source:  m.Source,
+			Options: m.Options,
+		}
+	}
+	return out
+}
diff --git a/vendor/github.com/containerd/containerd/snapshot/snapshotter.go b/vendor/github.com/containerd/containerd/snapshot/snapshotter.go
new file mode 100644
index 0000000..6beafd4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/snapshot/snapshotter.go
@@ -0,0 +1,303 @@
+package snapshot
+
+import (
+	"context"
+	"encoding/json"
+	"strings"
+	"time"
+
+	"github.com/containerd/containerd/mount"
+)
+
+// Kind identifies the kind of snapshot.
+type Kind uint8
+
+// definitions of snapshot kinds
+const (
+	KindUnknown Kind = iota
+	KindView
+	KindActive
+	KindCommitted
+)
+
+func ParseKind(s string) Kind {
+	s = strings.ToLower(s)
+	switch s {
+	case "view":
+		return KindView
+	case "active":
+		return KindActive
+	case "committed":
+		return KindCommitted
+	}
+
+	return KindUnknown
+}
+
+func (k Kind) String() string {
+	switch k {
+	case KindView:
+		return "View"
+	case KindActive:
+		return "Active"
+	case KindCommitted:
+		return "Committed"
+	}
+
+	return "Unknown"
+}
+
+func (k Kind) MarshalJSON() ([]byte, error) {
+	return json.Marshal(k.String())
+}
+
+func (k *Kind) UnmarshalJSON(b []byte) error {
+	var s string
+	if err := json.Unmarshal(b, &s); err != nil {
+		return err
+	}
+
+	*k = ParseKind(s)
+	return nil
+}
+
+// Info provides information about a particular snapshot.
+// JSON marshallability is supported for interactive with tools like ctr,
+type Info struct {
+	Kind    Kind              // active or committed snapshot
+	Name    string            // name or key of snapshot
+	Parent  string            `json:",omitempty"` // name of parent snapshot
+	Labels  map[string]string `json:",omitempty"` // Labels for snapshot
+	Created time.Time         `json:",omitempty"` // Created time
+	Updated time.Time         `json:",omitempty"` // Last update time
+}
+
+// Usage defines statistics for disk resources consumed by the snapshot.
+//
+// These resources only include the resources consumed by the snapshot itself
+// and does not include resources usage by the parent.
+type Usage struct {
+	Inodes int64 // number of inodes in use.
+	Size   int64 // provides usage, in bytes, of snapshot
+}
+
+func (u *Usage) Add(other Usage) {
+	u.Size += other.Size
+
+	// TODO(stevvooe): assumes independent inodes, but provides and upper
+	// bound. This should be pretty close, assuming the inodes for a
+	// snapshot are roughly unique to it. Don't trust this assumption.
+	u.Inodes += other.Inodes
+}
+
+// Snapshotter defines the methods required to implement a snapshot snapshotter for
+// allocating, snapshotting and mounting filesystem changesets. The model works
+// by building up sets of changes with parent-child relationships.
+//
+// A snapshot represents a filesystem state. Every snapshot has a parent, where
+// the empty parent is represented by the empty string. A diff can be taken
+// between a parent and its snapshot to generate a classic layer.
+//
+// An active snapshot is created by calling `Prepare`. After mounting, changes
+// can be made to the snapshot. The act of committing creates a committed
+// snapshot. The committed snapshot will get the parent of active snapshot. The
+// committed snapshot can then be used as a parent. Active snapshots can never
+// act as a parent.
+//
+// Snapshots are best understood by their lifecycle. Active snapshots are
+// always created with Prepare or View. Committed snapshots are always created
+// with Commit.  Active snapshots never become committed snapshots and vice
+// versa. All snapshots may be removed.
+//
+// For consistency, we define the following terms to be used throughout this
+// interface for snapshotter implementations:
+//
+// 	`ctx` - refers to a context.Context
+// 	`key` - refers to an active snapshot
+// 	`name` - refers to a committed snapshot
+// 	`parent` - refers to the parent in relation
+//
+// Most methods take various combinations of these identifiers. Typically,
+// `name` and `parent` will be used in cases where a method *only* takes
+// committed snapshots. `key` will be used to refer to active snapshots in most
+// cases, except where noted. All variables used to access snapshots use the
+// same key space. For example, an active snapshot may not share the same key
+// with a committed snapshot.
+//
+// We cover several examples below to demonstrate the utility of a snapshot
+// snapshotter.
+//
+// Importing a Layer
+//
+// To import a layer, we simply have the Snapshotter provide a list of
+// mounts to be applied such that our dst will capture a changeset. We start
+// out by getting a path to the layer tar file and creating a temp location to
+// unpack it to:
+//
+//	layerPath, tmpDir := getLayerPath(), mkTmpDir() // just a path to layer tar file.
+//
+// We start by using a Snapshotter to Prepare a new snapshot transaction, using a
+// key and descending from the empty parent "":
+//
+//	mounts, err := snapshotter.Prepare(ctx, key, "")
+// 	if err != nil { ... }
+//
+// We get back a list of mounts from Snapshotter.Prepare, with the key identifying
+// the active snapshot. Mount this to the temporary location with the
+// following:
+//
+//	if err := mount.All(mounts, tmpDir); err != nil { ... }
+//
+// Once the mounts are performed, our temporary location is ready to capture
+// a diff. In practice, this works similar to a filesystem transaction. The
+// next step is to unpack the layer. We have a special function unpackLayer
+// that applies the contents of the layer to target location and calculates the
+// DiffID of the unpacked layer (this is a requirement for docker
+// implementation):
+//
+//	layer, err := os.Open(layerPath)
+//	if err != nil { ... }
+// 	digest, err := unpackLayer(tmpLocation, layer) // unpack into layer location
+// 	if err != nil { ... }
+//
+// When the above completes, we should have a filesystem the represents the
+// contents of the layer. Careful implementations should verify that digest
+// matches the expected DiffID. When completed, we unmount the mounts:
+//
+//	unmount(mounts) // optional, for now
+//
+// Now that we've verified and unpacked our layer, we commit the active
+// snapshot to a name. For this example, we are just going to use the layer
+// digest, but in practice, this will probably be the ChainID:
+//
+//	if err := snapshotter.Commit(ctx, digest.String(), key); err != nil { ... }
+//
+// Now, we have a layer in the Snapshotter that can be accessed with the digest
+// provided during commit. Once you have committed the snapshot, the active
+// snapshot can be removed with the following:
+//
+// 	snapshotter.Remove(ctx, key)
+//
+// Importing the Next Layer
+//
+// Making a layer depend on the above is identical to the process described
+// above except that the parent is provided as parent when calling
+// Manager.Prepare, assuming a clean, unique key identifier:
+//
+// 	mounts, err := snapshotter.Prepare(ctx, key, parentDigest)
+//
+// We then mount, apply and commit, as we did above. The new snapshot will be
+// based on the content of the previous one.
+//
+// Running a Container
+//
+// To run a container, we simply provide Snapshotter.Prepare the committed image
+// snapshot as the parent. After mounting, the prepared path can
+// be used directly as the container's filesystem:
+//
+// 	mounts, err := snapshotter.Prepare(ctx, containerKey, imageRootFSChainID)
+//
+// The returned mounts can then be passed directly to the container runtime. If
+// one would like to create a new image from the filesystem, Manager.Commit is
+// called:
+//
+// 	if err := snapshotter.Commit(ctx, newImageSnapshot, containerKey); err != nil { ... }
+//
+// Alternatively, for most container runs, Snapshotter.Remove will be called to
+// signal the Snapshotter to abandon the changes.
+type Snapshotter interface {
+	// Stat returns the info for an active or committed snapshot by name or
+	// key.
+	//
+	// Should be used for parent resolution, existence checks and to discern
+	// the kind of snapshot.
+	Stat(ctx context.Context, key string) (Info, error)
+
+	// Update updates the info for a snapshot.
+	//
+	// Only mutable properties of a snapshot may be updated.
+	Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error)
+
+	// Usage returns the resource usage of an active or committed snapshot
+	// excluding the usage of parent snapshots.
+	//
+	// The running time of this call for active snapshots is dependent on
+	// implementation, but may be proportional to the size of the resource.
+	// Callers should take this into consideration. Implementations should
+	// attempt to honer context cancellation and avoid taking locks when making
+	// the calculation.
+	Usage(ctx context.Context, key string) (Usage, error)
+
+	// Mounts returns the mounts for the active snapshot transaction identified
+	// by key. Can be called on an read-write or readonly transaction. This is
+	// available only for active snapshots.
+	//
+	// This can be used to recover mounts after calling View or Prepare.
+	Mounts(ctx context.Context, key string) ([]mount.Mount, error)
+
+	// Prepare creates an active snapshot identified by key descending from the
+	// provided parent.  The returned mounts can be used to mount the snapshot
+	// to capture changes.
+	//
+	// If a parent is provided, after performing the mounts, the destination
+	// will start with the content of the parent. The parent must be a
+	// committed snapshot. Changes to the mounted destination will be captured
+	// in relation to the parent. The default parent, "", is an empty
+	// directory.
+	//
+	// The changes may be saved to a committed snapshot by calling Commit. When
+	// one is done with the transaction, Remove should be called on the key.
+	//
+	// Multiple calls to Prepare or View with the same key should fail.
+	Prepare(ctx context.Context, key, parent string, opts ...Opt) ([]mount.Mount, error)
+
+	// View behaves identically to Prepare except the result may not be
+	// committed back to the snapshot snapshotter. View returns a readonly view on
+	// the parent, with the active snapshot being tracked by the given key.
+	//
+	// This method operates identically to Prepare, except that Mounts returned
+	// may have the readonly flag set. Any modifications to the underlying
+	// filesystem will be ignored. Implementations may perform this in a more
+	// efficient manner that differs from what would be attempted with
+	// `Prepare`.
+	//
+	// Commit may not be called on the provided key and will return an error.
+	// To collect the resources associated with key, Remove must be called with
+	// key as the argument.
+	View(ctx context.Context, key, parent string, opts ...Opt) ([]mount.Mount, error)
+
+	// Commit captures the changes between key and its parent into a snapshot
+	// identified by name.  The name can then be used with the snapshotter's other
+	// methods to create subsequent snapshots.
+	//
+	// A committed snapshot will be created under name with the parent of the
+	// active snapshot.
+	//
+	// Commit may be called multiple times on the same key. Snapshots created
+	// in this manner will all reference the parent used to start the
+	// transaction.
+	Commit(ctx context.Context, name, key string, opts ...Opt) error
+
+	// Remove the committed or active snapshot by the provided key.
+	//
+	// All resources associated with the key will be removed.
+	//
+	// If the snapshot is a parent of another snapshot, its children must be
+	// removed before proceeding.
+	Remove(ctx context.Context, key string) error
+
+	// Walk all snapshots in the snapshotter. For each snapshot in the
+	// snapshotter, the function will be called.
+	Walk(ctx context.Context, fn func(context.Context, Info) error) error
+}
+
+// Opt allows setting mutable snapshot properties on creation
+type Opt func(info *Info) error
+
+// WithLabels adds labels to a created snapshot
+func WithLabels(labels map[string]string) Opt {
+	return func(info *Info) error {
+		info.Labels = labels
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/snapshotter_default_linux.go b/vendor/github.com/containerd/containerd/snapshotter_default_linux.go
new file mode 100644
index 0000000..c432a2d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/snapshotter_default_linux.go
@@ -0,0 +1,8 @@
+package containerd
+
+const (
+	// DefaultSnapshotter will set the default snapshotter for the platform.
+	// This will be based on the client compilation target, so take that into
+	// account when choosing this value.
+	DefaultSnapshotter = "overlayfs"
+)
diff --git a/vendor/github.com/containerd/containerd/snapshotter_default_unix.go b/vendor/github.com/containerd/containerd/snapshotter_default_unix.go
new file mode 100644
index 0000000..cb8b08a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/snapshotter_default_unix.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd solaris
+
+package containerd
+
+const (
+	// DefaultSnapshotter will set the default snapshotter for the platform.
+	// This will be based on the client compilation target, so take that into
+	// account when choosing this value.
+	DefaultSnapshotter = "naive"
+)
diff --git a/vendor/github.com/containerd/containerd/snapshotter_default_windows.go b/vendor/github.com/containerd/containerd/snapshotter_default_windows.go
new file mode 100644
index 0000000..3b73582
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/snapshotter_default_windows.go
@@ -0,0 +1,8 @@
+package containerd
+
+const (
+	// DefaultSnapshotter will set the default snapshotter for the platform.
+	// This will be based on the client compilation target, so take that into
+	// account when choosing this value.
+	DefaultSnapshotter = "windows"
+)
diff --git a/vendor/github.com/containerd/containerd/spec.go b/vendor/github.com/containerd/containerd/spec.go
new file mode 100644
index 0000000..850f470
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/spec.go
@@ -0,0 +1,23 @@
+package containerd
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/containers"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// GenerateSpec will generate a default spec from the provided image
+// for use as a containerd container
+func GenerateSpec(ctx context.Context, client *Client, c *containers.Container, opts ...SpecOpts) (*specs.Spec, error) {
+	s, err := createDefaultSpec(ctx, c.ID)
+	if err != nil {
+		return nil, err
+	}
+	for _, o := range opts {
+		if err := o(ctx, client, c, s); err != nil {
+			return nil, err
+		}
+	}
+	return s, nil
+}
diff --git a/vendor/github.com/containerd/containerd/spec_opts.go b/vendor/github.com/containerd/containerd/spec_opts.go
new file mode 100644
index 0000000..2dbf821
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/spec_opts.go
@@ -0,0 +1,74 @@
+package containerd
+
+import (
+	"context"
+
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/typeurl"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// SpecOpts sets spec specific information to a newly generated OCI spec
+type SpecOpts func(context.Context, *Client, *containers.Container, *specs.Spec) error
+
+// WithProcessArgs replaces the args on the generated spec
+func WithProcessArgs(args ...string) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		s.Process.Args = args
+		return nil
+	}
+}
+
+// WithProcessCwd replaces the current working directory on the generated spec
+func WithProcessCwd(cwd string) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		s.Process.Cwd = cwd
+		return nil
+	}
+}
+
+// WithHostname sets the container's hostname
+func WithHostname(name string) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		s.Hostname = name
+		return nil
+	}
+}
+
+// WithNewSpec generates a new spec for a new container
+func WithNewSpec(opts ...SpecOpts) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		s, err := createDefaultSpec(ctx, c.ID)
+		if err != nil {
+			return err
+		}
+		for _, o := range opts {
+			if err := o(ctx, client, c, s); err != nil {
+				return err
+			}
+		}
+		any, err := typeurl.MarshalAny(s)
+		if err != nil {
+			return err
+		}
+		c.Spec = any
+		return nil
+	}
+}
+
+// WithSpec sets the provided spec on the container
+func WithSpec(s *specs.Spec, opts ...SpecOpts) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		for _, o := range opts {
+			if err := o(ctx, client, c, s); err != nil {
+				return err
+			}
+		}
+		any, err := typeurl.MarshalAny(s)
+		if err != nil {
+			return err
+		}
+		c.Spec = any
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/spec_opts_unix.go b/vendor/github.com/containerd/containerd/spec_opts_unix.go
new file mode 100644
index 0000000..7009522
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/spec_opts_unix.go
@@ -0,0 +1,437 @@
+// +build !windows
+
+package containerd
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"time"
+
+	"golang.org/x/sys/unix"
+
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/fs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/namespaces"
+	"github.com/containerd/containerd/platforms"
+	"github.com/containerd/containerd/snapshot"
+	"github.com/opencontainers/image-spec/identity"
+	"github.com/opencontainers/image-spec/specs-go/v1"
+	"github.com/opencontainers/runc/libcontainer/user"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+// WithTTY sets the information on the spec as well as the environment variables for
+// using a TTY
+func WithTTY(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	s.Process.Terminal = true
+	s.Process.Env = append(s.Process.Env, "TERM=xterm")
+	return nil
+}
+
+// WithHostNamespace allows a task to run inside the host's linux namespace
+func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		for i, n := range s.Linux.Namespaces {
+			if n.Type == ns {
+				s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...)
+				return nil
+			}
+		}
+		return nil
+	}
+}
+
+// WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the
+// spec, the existing namespace is replaced by the one provided.
+func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		for i, n := range s.Linux.Namespaces {
+			if n.Type == ns.Type {
+				before := s.Linux.Namespaces[:i]
+				after := s.Linux.Namespaces[i+1:]
+				s.Linux.Namespaces = append(before, ns)
+				s.Linux.Namespaces = append(s.Linux.Namespaces, after...)
+				return nil
+			}
+		}
+		s.Linux.Namespaces = append(s.Linux.Namespaces, ns)
+		return nil
+	}
+}
+
+// WithImageConfig configures the spec to from the configuration of an Image
+func WithImageConfig(i Image) SpecOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
+		var (
+			image = i.(*image)
+			store = client.ContentStore()
+		)
+		ic, err := image.i.Config(ctx, store, platforms.Default())
+		if err != nil {
+			return err
+		}
+		var (
+			ociimage v1.Image
+			config   v1.ImageConfig
+		)
+		switch ic.MediaType {
+		case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
+			p, err := content.ReadBlob(ctx, store, ic.Digest)
+			if err != nil {
+				return err
+			}
+
+			if err := json.Unmarshal(p, &ociimage); err != nil {
+				return err
+			}
+			config = ociimage.Config
+		default:
+			return fmt.Errorf("unknown image config media type %s", ic.MediaType)
+		}
+		s.Process.Env = append(s.Process.Env, config.Env...)
+		cmd := config.Cmd
+		s.Process.Args = append(config.Entrypoint, cmd...)
+		if config.User != "" {
+			parts := strings.Split(config.User, ":")
+			switch len(parts) {
+			case 1:
+				v, err := strconv.ParseUint(parts[0], 0, 10)
+				if err != nil {
+					// if we cannot parse as a uint they try to see if it is a username
+					if err := WithUsername(config.User)(ctx, client, c, s); err != nil {
+						return err
+					}
+					return err
+				}
+				if err := WithUserID(uint32(v))(ctx, client, c, s); err != nil {
+					return err
+				}
+			case 2:
+				v, err := strconv.ParseUint(parts[0], 0, 10)
+				if err != nil {
+					return err
+				}
+				uid := uint32(v)
+				if v, err = strconv.ParseUint(parts[1], 0, 10); err != nil {
+					return err
+				}
+				gid := uint32(v)
+				s.Process.User.UID, s.Process.User.GID = uid, gid
+			default:
+				return fmt.Errorf("invalid USER value %s", config.User)
+			}
+		}
+		cwd := config.WorkingDir
+		if cwd == "" {
+			cwd = "/"
+		}
+		s.Process.Cwd = cwd
+		return nil
+	}
+}
+
+// WithRootFSPath specifies unmanaged rootfs path.
+func WithRootFSPath(path string) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		if s.Root == nil {
+			s.Root = &specs.Root{}
+		}
+		s.Root.Path = path
+		// Entrypoint is not set here (it's up to caller)
+		return nil
+	}
+}
+
+// WithRootFSReadonly sets specs.Root.Readonly to true
+func WithRootFSReadonly() SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		if s.Root == nil {
+			s.Root = &specs.Root{}
+		}
+		s.Root.Readonly = true
+		return nil
+	}
+}
+
+// WithResources sets the provided resources on the spec for task updates
+func WithResources(resources *specs.LinuxResources) UpdateTaskOpts {
+	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
+		r.Resources = resources
+		return nil
+	}
+}
+
+// WithNoNewPrivileges sets no_new_privileges on the process for the container
+func WithNoNewPrivileges(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	s.Process.NoNewPrivileges = true
+	return nil
+}
+
+// WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly
+func WithHostHostsFile(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	s.Mounts = append(s.Mounts, specs.Mount{
+		Destination: "/etc/hosts",
+		Type:        "bind",
+		Source:      "/etc/hosts",
+		Options:     []string{"rbind", "ro"},
+	})
+	return nil
+}
+
+// WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly
+func WithHostResolvconf(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	s.Mounts = append(s.Mounts, specs.Mount{
+		Destination: "/etc/resolv.conf",
+		Type:        "bind",
+		Source:      "/etc/resolv.conf",
+		Options:     []string{"rbind", "ro"},
+	})
+	return nil
+}
+
+// WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly
+func WithHostLocaltime(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+	s.Mounts = append(s.Mounts, specs.Mount{
+		Destination: "/etc/localtime",
+		Type:        "bind",
+		Source:      "/etc/localtime",
+		Options:     []string{"rbind", "ro"},
+	})
+	return nil
+}
+
+// WithUserNamespace sets the uid and gid mappings for the task
+// this can be called multiple times to add more mappings to the generated spec
+func WithUserNamespace(container, host, size uint32) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		var hasUserns bool
+		for _, ns := range s.Linux.Namespaces {
+			if ns.Type == specs.UserNamespace {
+				hasUserns = true
+				break
+			}
+		}
+		if !hasUserns {
+			s.Linux.Namespaces = append(s.Linux.Namespaces, specs.LinuxNamespace{
+				Type: specs.UserNamespace,
+			})
+		}
+		mapping := specs.LinuxIDMapping{
+			ContainerID: container,
+			HostID:      host,
+			Size:        size,
+		}
+		s.Linux.UIDMappings = append(s.Linux.UIDMappings, mapping)
+		s.Linux.GIDMappings = append(s.Linux.GIDMappings, mapping)
+		return nil
+	}
+}
+
+// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
+// filesystem to be used by a container with user namespaces
+func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
+	return withRemappedSnapshotBase(id, i, uid, gid, false)
+}
+
+// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only.
+func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts {
+	return withRemappedSnapshotBase(id, i, uid, gid, true)
+}
+
+func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container) error {
+		diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Default())
+		if err != nil {
+			return err
+		}
+
+		setSnapshotterIfEmpty(c)
+
+		var (
+			snapshotter = client.SnapshotService(c.Snapshotter)
+			parent      = identity.ChainID(diffIDs).String()
+			usernsID    = fmt.Sprintf("%s-%d-%d", parent, uid, gid)
+			opt         = snapshot.WithLabels(map[string]string{
+				"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
+			})
+		)
+		if _, err := snapshotter.Stat(ctx, usernsID); err == nil {
+			if _, err := snapshotter.Prepare(ctx, id, usernsID, opt); err != nil {
+				return err
+			}
+			c.SnapshotKey = id
+			c.Image = i.Name()
+			return nil
+		}
+		mounts, err := snapshotter.Prepare(ctx, usernsID+"-remap", parent, opt)
+		if err != nil {
+			return err
+		}
+		if err := remapRootFS(mounts, uid, gid); err != nil {
+			snapshotter.Remove(ctx, usernsID)
+			return err
+		}
+		if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap", opt); err != nil {
+			return err
+		}
+		if readonly {
+			_, err = snapshotter.View(ctx, id, usernsID, opt)
+		} else {
+			_, err = snapshotter.Prepare(ctx, id, usernsID, opt)
+		}
+		if err != nil {
+			return err
+		}
+		c.SnapshotKey = id
+		c.Image = i.Name()
+		return nil
+	}
+}
+
+// WithCgroup sets the container's cgroup path
+func WithCgroup(path string) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		s.Linux.CgroupsPath = path
+		return nil
+	}
+}
+
+// WithNamespacedCgroup uses the namespace set on the context to create a
+// root directory for containers in the cgroup with the id as the subcgroup
+func WithNamespacedCgroup() SpecOpts {
+	return func(ctx context.Context, _ *Client, c *containers.Container, s *specs.Spec) error {
+		namespace, err := namespaces.NamespaceRequired(ctx)
+		if err != nil {
+			return err
+		}
+		s.Linux.CgroupsPath = filepath.Join("/", namespace, c.ID)
+		return nil
+	}
+}
+
+// WithUIDGID allows the UID and GID for the Process to be set
+func WithUIDGID(uid, gid uint32) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		s.Process.User.UID = uid
+		s.Process.User.GID = gid
+		return nil
+	}
+}
+
+// WithUserID sets the correct UID and GID for the container based
+// on the image's /etc/passwd contents. If /etc/passwd does not exist,
+// or uid is not found in /etc/passwd, it sets gid to be the same with
+// uid, and not returns error.
+func WithUserID(uid uint32) SpecOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
+		if c.Snapshotter == "" {
+			return errors.Errorf("no snapshotter set for container")
+		}
+		if c.SnapshotKey == "" {
+			return errors.Errorf("rootfs snapshot not created for container")
+		}
+		snapshotter := client.SnapshotService(c.Snapshotter)
+		mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
+		if err != nil {
+			return err
+		}
+		root, err := ioutil.TempDir("", "ctd-username")
+		if err != nil {
+			return err
+		}
+		defer os.RemoveAll(root)
+		for _, m := range mounts {
+			if err := m.Mount(root); err != nil {
+				return err
+			}
+		}
+		defer unix.Unmount(root, 0)
+		ppath, err := fs.RootPath(root, "/etc/passwd")
+		if err != nil {
+			return err
+		}
+		f, err := os.Open(ppath)
+		if err != nil {
+			if os.IsNotExist(err) {
+				s.Process.User.UID, s.Process.User.GID = uid, uid
+				return nil
+			}
+			return err
+		}
+		defer f.Close()
+		users, err := user.ParsePasswdFilter(f, func(u user.User) bool {
+			return u.Uid == int(uid)
+		})
+		if err != nil {
+			return err
+		}
+		if len(users) == 0 {
+			s.Process.User.UID, s.Process.User.GID = uid, uid
+			return nil
+		}
+		u := users[0]
+		s.Process.User.UID, s.Process.User.GID = uint32(u.Uid), uint32(u.Gid)
+		return nil
+	}
+}
+
+// WithUsername sets the correct UID and GID for the container
+// based on the the image's /etc/passwd contents. If /etc/passwd
+// does not exist, or the username is not found in /etc/passwd,
+// it returns error.
+func WithUsername(username string) SpecOpts {
+	return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
+		if c.Snapshotter == "" {
+			return errors.Errorf("no snapshotter set for container")
+		}
+		if c.SnapshotKey == "" {
+			return errors.Errorf("rootfs snapshot not created for container")
+		}
+		snapshotter := client.SnapshotService(c.Snapshotter)
+		mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
+		if err != nil {
+			return err
+		}
+		root, err := ioutil.TempDir("", "ctd-username")
+		if err != nil {
+			return err
+		}
+		defer os.RemoveAll(root)
+		for _, m := range mounts {
+			if err := m.Mount(root); err != nil {
+				return err
+			}
+		}
+		defer unix.Unmount(root, 0)
+		ppath, err := fs.RootPath(root, "/etc/passwd")
+		if err != nil {
+			return err
+		}
+		f, err := os.Open(ppath)
+		if err != nil {
+			return err
+		}
+		defer f.Close()
+		users, err := user.ParsePasswdFilter(f, func(u user.User) bool {
+			return u.Name == username
+		})
+		if err != nil {
+			return err
+		}
+		if len(users) == 0 {
+			return errors.Errorf("no users found for %s", username)
+		}
+		u := users[0]
+		s.Process.User.UID, s.Process.User.GID = uint32(u.Uid), uint32(u.Gid)
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/spec_opts_windows.go b/vendor/github.com/containerd/containerd/spec_opts_windows.go
new file mode 100644
index 0000000..5aa5c30
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/spec_opts_windows.go
@@ -0,0 +1,71 @@
+// +build windows
+
+package containerd
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+
+	"github.com/containerd/containerd/containers"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/platforms"
+	"github.com/opencontainers/image-spec/specs-go/v1"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func WithImageConfig(i Image) SpecOpts {
+	return func(ctx context.Context, client *Client, _ *containers.Container, s *specs.Spec) error {
+		var (
+			image = i.(*image)
+			store = client.ContentStore()
+		)
+		ic, err := image.i.Config(ctx, store, platforms.Default())
+		if err != nil {
+			return err
+		}
+		var (
+			ociimage v1.Image
+			config   v1.ImageConfig
+		)
+		switch ic.MediaType {
+		case v1.MediaTypeImageConfig, images.MediaTypeDockerSchema2Config:
+			p, err := content.ReadBlob(ctx, store, ic.Digest)
+			if err != nil {
+				return err
+			}
+			if err := json.Unmarshal(p, &ociimage); err != nil {
+				return err
+			}
+			config = ociimage.Config
+		default:
+			return fmt.Errorf("unknown image config media type %s", ic.MediaType)
+		}
+		s.Process.Env = config.Env
+		s.Process.Args = append(config.Entrypoint, config.Cmd...)
+		s.Process.User = specs.User{
+			Username: config.User,
+		}
+		return nil
+	}
+}
+
+func WithTTY(width, height int) SpecOpts {
+	return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
+		s.Process.Terminal = true
+		if s.Process.ConsoleSize == nil {
+			s.Process.ConsoleSize = &specs.Box{}
+		}
+		s.Process.ConsoleSize.Width = uint(width)
+		s.Process.ConsoleSize.Height = uint(height)
+		return nil
+	}
+}
+
+func WithResources(resources *specs.WindowsResources) UpdateTaskOpts {
+	return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
+		r.Resources = resources
+		return nil
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/spec_unix.go b/vendor/github.com/containerd/containerd/spec_unix.go
new file mode 100644
index 0000000..9a0b537
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/spec_unix.go
@@ -0,0 +1,205 @@
+// +build !windows
+
+package containerd
+
+import (
+	"context"
+	"io/ioutil"
+	"os"
+	"path/filepath"
+	"syscall"
+
+	"golang.org/x/sys/unix"
+
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/namespaces"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+const (
+	rwm               = "rwm"
+	defaultRootfsPath = "rootfs"
+)
+
+var (
+	defaultEnv = []string{
+		"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+	}
+)
+
+func defaultCaps() []string {
+	return []string{
+		"CAP_CHOWN",
+		"CAP_DAC_OVERRIDE",
+		"CAP_FSETID",
+		"CAP_FOWNER",
+		"CAP_MKNOD",
+		"CAP_NET_RAW",
+		"CAP_SETGID",
+		"CAP_SETUID",
+		"CAP_SETFCAP",
+		"CAP_SETPCAP",
+		"CAP_NET_BIND_SERVICE",
+		"CAP_SYS_CHROOT",
+		"CAP_KILL",
+		"CAP_AUDIT_WRITE",
+	}
+}
+
+func defaultNamespaces() []specs.LinuxNamespace {
+	return []specs.LinuxNamespace{
+		{
+			Type: specs.PIDNamespace,
+		},
+		{
+			Type: specs.IPCNamespace,
+		},
+		{
+			Type: specs.UTSNamespace,
+		},
+		{
+			Type: specs.MountNamespace,
+		},
+		{
+			Type: specs.NetworkNamespace,
+		},
+	}
+}
+
+func createDefaultSpec(ctx context.Context, id string) (*specs.Spec, error) {
+	ns, err := namespaces.NamespaceRequired(ctx)
+	if err != nil {
+		return nil, err
+	}
+	s := &specs.Spec{
+		Version: specs.Version,
+		Root: &specs.Root{
+			Path: defaultRootfsPath,
+		},
+		Process: &specs.Process{
+			Env:             defaultEnv,
+			Cwd:             "/",
+			NoNewPrivileges: true,
+			User: specs.User{
+				UID: 0,
+				GID: 0,
+			},
+			Capabilities: &specs.LinuxCapabilities{
+				Bounding:    defaultCaps(),
+				Permitted:   defaultCaps(),
+				Inheritable: defaultCaps(),
+				Effective:   defaultCaps(),
+			},
+			Rlimits: []specs.POSIXRlimit{
+				{
+					Type: "RLIMIT_NOFILE",
+					Hard: uint64(1024),
+					Soft: uint64(1024),
+				},
+			},
+		},
+		Mounts: []specs.Mount{
+			{
+				Destination: "/proc",
+				Type:        "proc",
+				Source:      "proc",
+			},
+			{
+				Destination: "/dev",
+				Type:        "tmpfs",
+				Source:      "tmpfs",
+				Options:     []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
+			},
+			{
+				Destination: "/dev/pts",
+				Type:        "devpts",
+				Source:      "devpts",
+				Options:     []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"},
+			},
+			{
+				Destination: "/dev/shm",
+				Type:        "tmpfs",
+				Source:      "shm",
+				Options:     []string{"nosuid", "noexec", "nodev", "mode=1777", "size=65536k"},
+			},
+			{
+				Destination: "/dev/mqueue",
+				Type:        "mqueue",
+				Source:      "mqueue",
+				Options:     []string{"nosuid", "noexec", "nodev"},
+			},
+			{
+				Destination: "/sys",
+				Type:        "sysfs",
+				Source:      "sysfs",
+				Options:     []string{"nosuid", "noexec", "nodev", "ro"},
+			},
+			{
+				Destination: "/run",
+				Type:        "tmpfs",
+				Source:      "tmpfs",
+				Options:     []string{"nosuid", "strictatime", "mode=755", "size=65536k"},
+			},
+		},
+		Linux: &specs.Linux{
+			// TODO (AkihiroSuda): unmask /sys/firmware on Windows daemon for LCOW support?
+			// https://github.com/moby/moby/pull/33241/files#diff-a1f5051ce84e711a2ee688ab9ded5e74R215
+			MaskedPaths: []string{
+				"/proc/kcore",
+				"/proc/latency_stats",
+				"/proc/timer_list",
+				"/proc/timer_stats",
+				"/proc/sched_debug",
+				"/sys/firmware",
+			},
+			ReadonlyPaths: []string{
+				"/proc/asound",
+				"/proc/bus",
+				"/proc/fs",
+				"/proc/irq",
+				"/proc/sys",
+				"/proc/sysrq-trigger",
+			},
+			CgroupsPath: filepath.Join("/", ns, id),
+			Resources: &specs.LinuxResources{
+				Devices: []specs.LinuxDeviceCgroup{
+					{
+						Allow:  false,
+						Access: rwm,
+					},
+				},
+			},
+			Namespaces: defaultNamespaces(),
+		},
+	}
+	return s, nil
+}
+
+func remapRootFS(mounts []mount.Mount, uid, gid uint32) error {
+	root, err := ioutil.TempDir("", "ctd-remap")
+	if err != nil {
+		return err
+	}
+	defer os.RemoveAll(root)
+	for _, m := range mounts {
+		if err := m.Mount(root); err != nil {
+			return err
+		}
+	}
+	defer unix.Unmount(root, 0)
+	return filepath.Walk(root, incrementFS(root, uid, gid))
+}
+
+func incrementFS(root string, uidInc, gidInc uint32) filepath.WalkFunc {
+	return func(path string, info os.FileInfo, err error) error {
+		if err != nil {
+			return err
+		}
+		var (
+			stat = info.Sys().(*syscall.Stat_t)
+			u, g = int(stat.Uid + uidInc), int(stat.Gid + gidInc)
+		)
+		// be sure the lchown the path as to not de-reference the symlink to a host file
+		return os.Lchown(path, u, g)
+	}
+}
diff --git a/vendor/github.com/containerd/containerd/spec_windows.go b/vendor/github.com/containerd/containerd/spec_windows.go
new file mode 100644
index 0000000..16a58b4
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/spec_windows.go
@@ -0,0 +1,27 @@
+package containerd
+
+import (
+	"context"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+func createDefaultSpec(ctx context.Context, id string) (*specs.Spec, error) {
+	return &specs.Spec{
+		Version: specs.Version,
+		Root:    &specs.Root{},
+		Process: &specs.Process{
+			Cwd: `C:\`,
+			ConsoleSize: &specs.Box{
+				Width:  80,
+				Height: 20,
+			},
+		},
+		Windows: &specs.Windows{
+			IgnoreFlushesDuringBoot: true,
+			Network: &specs.WindowsNetwork{
+				AllowUnqualifiedDNSQuery: true,
+			},
+		},
+	}, nil
+}
diff --git a/vendor/github.com/containerd/containerd/specs/spec_linux.go b/vendor/github.com/containerd/containerd/specs/spec_linux.go
deleted file mode 100644
index d415c1d..0000000
--- a/vendor/github.com/containerd/containerd/specs/spec_linux.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package specs
-
-import oci "github.com/opencontainers/runtime-spec/specs-go"
-
-type (
-	// ProcessSpec aliases the platform process specs
-	ProcessSpec oci.Process
-	// Spec aliases the platform oci spec
-	Spec oci.Spec
-	// Rlimit aliases the platform resource limit
-	Rlimit oci.POSIXRlimit
-)
diff --git a/vendor/github.com/containerd/containerd/specs/spec_solaris.go b/vendor/github.com/containerd/containerd/specs/spec_solaris.go
deleted file mode 100644
index 1b60d3d..0000000
--- a/vendor/github.com/containerd/containerd/specs/spec_solaris.go
+++ /dev/null
@@ -1,10 +0,0 @@
-package specs
-
-import ocs "github.com/opencontainers/runtime-spec/specs-go"
-
-type (
-	// ProcessSpec aliases the platform process specs
-	ProcessSpec ocs.Process
-	// Spec aliases the platform oci spec
-	Spec ocs.Spec
-)
diff --git a/vendor/github.com/containerd/containerd/sys/epoll.go b/vendor/github.com/containerd/containerd/sys/epoll.go
new file mode 100644
index 0000000..3a4d97c
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/epoll.go
@@ -0,0 +1,20 @@
+// +build linux
+
+package sys
+
+import "golang.org/x/sys/unix"
+
+// EpollCreate1 directly calls unix.EpollCreate1
+func EpollCreate1(flag int) (int, error) {
+	return unix.EpollCreate1(flag)
+}
+
+// EpollCtl directly calls unix.EpollCtl
+func EpollCtl(epfd int, op int, fd int, event *unix.EpollEvent) error {
+	return unix.EpollCtl(epfd, op, fd, event)
+}
+
+// EpollWait directly calls unix.EpollWait
+func EpollWait(epfd int, events []unix.EpollEvent, msec int) (int, error) {
+	return unix.EpollWait(epfd, events, msec)
+}
diff --git a/vendor/github.com/containerd/containerd/sys/fds.go b/vendor/github.com/containerd/containerd/sys/fds.go
new file mode 100644
index 0000000..3c1ec67
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/fds.go
@@ -0,0 +1,18 @@
+// +build !windows,!darwin
+
+package sys
+
+import (
+	"io/ioutil"
+	"path/filepath"
+	"strconv"
+)
+
+// GetOpenFds returns the number of open fds for the process provided by pid
+func GetOpenFds(pid int) (int, error) {
+	dirs, err := ioutil.ReadDir(filepath.Join("/proc", strconv.Itoa(pid), "fd"))
+	if err != nil {
+		return -1, err
+	}
+	return len(dirs), nil
+}
diff --git a/vendor/github.com/containerd/containerd/sys/filesys_windows.go b/vendor/github.com/containerd/containerd/sys/filesys_windows.go
new file mode 100644
index 0000000..b5ce135
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/filesys_windows.go
@@ -0,0 +1,236 @@
+// +build windows
+
+package sys
+
+import (
+	"os"
+	"path/filepath"
+	"regexp"
+	"strings"
+	"syscall"
+	"unsafe"
+
+	winio "github.com/Microsoft/go-winio"
+)
+
+// MkdirAllWithACL is a wrapper for MkdirAll that creates a directory
+// ACL'd for Builtin Administrators and Local System.
+func MkdirAllWithACL(path string, perm os.FileMode) error {
+	return mkdirall(path, true)
+}
+
+// MkdirAll implementation that is volume path aware for Windows.
+func MkdirAll(path string, _ os.FileMode) error {
+	return mkdirall(path, false)
+}
+
+// mkdirall is a custom version of os.MkdirAll modified for use on Windows
+// so that it is both volume path aware, and can create a directory with
+// a DACL.
+func mkdirall(path string, adminAndLocalSystem bool) error {
+	if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
+		return nil
+	}
+
+	// The rest of this method is largely copied from os.MkdirAll and should be kept
+	// as-is to ensure compatibility.
+
+	// Fast path: if we can tell whether path is a directory or file, stop with success or error.
+	dir, err := os.Stat(path)
+	if err == nil {
+		if dir.IsDir() {
+			return nil
+		}
+		return &os.PathError{
+			Op:   "mkdir",
+			Path: path,
+			Err:  syscall.ENOTDIR,
+		}
+	}
+
+	// Slow path: make sure parent exists and then call Mkdir for path.
+	i := len(path)
+	for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
+		i--
+	}
+
+	j := i
+	for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
+		j--
+	}
+
+	if j > 1 {
+		// Create parent
+		err = mkdirall(path[0:j-1], false)
+		if err != nil {
+			return err
+		}
+	}
+
+	// Parent now exists; invoke os.Mkdir or mkdirWithACL and use its result.
+	if adminAndLocalSystem {
+		err = mkdirWithACL(path)
+	} else {
+		err = os.Mkdir(path, 0)
+	}
+
+	if err != nil {
+		// Handle arguments like "foo/." by
+		// double-checking that directory doesn't exist.
+		dir, err1 := os.Lstat(path)
+		if err1 == nil && dir.IsDir() {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
+
+// mkdirWithACL creates a new directory. If there is an error, it will be of
+// type *PathError. .
+//
+// This is a modified and combined version of os.Mkdir and syscall.Mkdir
+// in golang to cater for creating a directory am ACL permitting full
+// access, with inheritance, to any subfolder/file for Built-in Administrators
+// and Local System.
+func mkdirWithACL(name string) error {
+	sa := syscall.SecurityAttributes{Length: 0}
+	sddl := "D:P(A;OICI;GA;;;BA)(A;OICI;GA;;;SY)"
+	sd, err := winio.SddlToSecurityDescriptor(sddl)
+	if err != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
+	}
+	sa.Length = uint32(unsafe.Sizeof(sa))
+	sa.InheritHandle = 1
+	sa.SecurityDescriptor = uintptr(unsafe.Pointer(&sd[0]))
+
+	namep, err := syscall.UTF16PtrFromString(name)
+	if err != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: err}
+	}
+
+	e := syscall.CreateDirectory(namep, &sa)
+	if e != nil {
+		return &os.PathError{Op: "mkdir", Path: name, Err: e}
+	}
+	return nil
+}
+
+// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
+// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
+// as it doesn't start with a drive-letter/colon combination. However, in
+// docker we need to verify things such as WORKDIR /windows/system32 in
+// a Dockerfile (which gets translated to \windows\system32 when being processed
+// by the daemon. This SHOULD be treated as absolute from a docker processing
+// perspective.
+func IsAbs(path string) bool {
+	if !filepath.IsAbs(path) {
+		if !strings.HasPrefix(path, string(os.PathSeparator)) {
+			return false
+		}
+	}
+	return true
+}
+
+// The origin of the functions below here are the golang OS and syscall packages,
+// slightly modified to only cope with files, not directories due to the
+// specific use case.
+//
+// The alteration is to allow a file on Windows to be opened with
+// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
+// the standby list, particularly when accessing large files such as layer.tar.
+
+// CreateSequential creates the named file with mode 0666 (before umask), truncating
+// it if it already exists. If successful, methods on the returned
+// File can be used for I/O; the associated file descriptor has mode
+// O_RDWR.
+// If there is an error, it will be of type *PathError.
+func CreateSequential(name string) (*os.File, error) {
+	return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
+}
+
+// OpenSequential opens the named file for reading. If successful, methods on
+// the returned file can be used for reading; the associated file
+// descriptor has mode O_RDONLY.
+// If there is an error, it will be of type *PathError.
+func OpenSequential(name string) (*os.File, error) {
+	return OpenFileSequential(name, os.O_RDONLY, 0)
+}
+
+// OpenFileSequential is the generalized open call; most users will use Open
+// or Create instead.
+// If there is an error, it will be of type *PathError.
+func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
+	if name == "" {
+		return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
+	}
+	r, errf := syscallOpenFileSequential(name, flag, 0)
+	if errf == nil {
+		return r, nil
+	}
+	return nil, &os.PathError{Op: "open", Path: name, Err: errf}
+}
+
+func syscallOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
+	r, e := syscallOpenSequential(name, flag|syscall.O_CLOEXEC, 0)
+	if e != nil {
+		return nil, e
+	}
+	return os.NewFile(uintptr(r), name), nil
+}
+
+func makeInheritSa() *syscall.SecurityAttributes {
+	var sa syscall.SecurityAttributes
+	sa.Length = uint32(unsafe.Sizeof(sa))
+	sa.InheritHandle = 1
+	return &sa
+}
+
+func syscallOpenSequential(path string, mode int, _ uint32) (fd syscall.Handle, err error) {
+	if len(path) == 0 {
+		return syscall.InvalidHandle, syscall.ERROR_FILE_NOT_FOUND
+	}
+	pathp, err := syscall.UTF16PtrFromString(path)
+	if err != nil {
+		return syscall.InvalidHandle, err
+	}
+	var access uint32
+	switch mode & (syscall.O_RDONLY | syscall.O_WRONLY | syscall.O_RDWR) {
+	case syscall.O_RDONLY:
+		access = syscall.GENERIC_READ
+	case syscall.O_WRONLY:
+		access = syscall.GENERIC_WRITE
+	case syscall.O_RDWR:
+		access = syscall.GENERIC_READ | syscall.GENERIC_WRITE
+	}
+	if mode&syscall.O_CREAT != 0 {
+		access |= syscall.GENERIC_WRITE
+	}
+	if mode&syscall.O_APPEND != 0 {
+		access &^= syscall.GENERIC_WRITE
+		access |= syscall.FILE_APPEND_DATA
+	}
+	sharemode := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE)
+	var sa *syscall.SecurityAttributes
+	if mode&syscall.O_CLOEXEC == 0 {
+		sa = makeInheritSa()
+	}
+	var createmode uint32
+	switch {
+	case mode&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):
+		createmode = syscall.CREATE_NEW
+	case mode&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):
+		createmode = syscall.CREATE_ALWAYS
+	case mode&syscall.O_CREAT == syscall.O_CREAT:
+		createmode = syscall.OPEN_ALWAYS
+	case mode&syscall.O_TRUNC == syscall.O_TRUNC:
+		createmode = syscall.TRUNCATE_EXISTING
+	default:
+		createmode = syscall.OPEN_EXISTING
+	}
+	// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
+	//https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
+	const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
+	h, e := syscall.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
+	return h, e
+}
diff --git a/vendor/github.com/containerd/containerd/sys/oom_unix.go b/vendor/github.com/containerd/containerd/sys/oom_unix.go
new file mode 100644
index 0000000..23fcc94
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/oom_unix.go
@@ -0,0 +1,31 @@
+// +build !windows
+
+package sys
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+
+	"github.com/opencontainers/runc/libcontainer/system"
+)
+
+// OOMScoreMaxKillable is the maximum score keeping the process killable by the oom killer
+const OOMScoreMaxKillable = -999
+
+// SetOOMScore sets the oom score for the provided pid
+func SetOOMScore(pid, score int) error {
+	path := fmt.Sprintf("/proc/%d/oom_score_adj", pid)
+	f, err := os.OpenFile(path, os.O_WRONLY, 0)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	if _, err = f.WriteString(strconv.Itoa(score)); err != nil {
+		if os.IsPermission(err) && system.RunningInUserNS() {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/sys/oom_windows.go b/vendor/github.com/containerd/containerd/sys/oom_windows.go
new file mode 100644
index 0000000..a72568b
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/oom_windows.go
@@ -0,0 +1,5 @@
+package sys
+
+func SetOOMScore(pid, score int) error {
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/sys/prctl.go b/vendor/github.com/containerd/containerd/sys/prctl.go
new file mode 100644
index 0000000..aa1a4ad
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/prctl.go
@@ -0,0 +1,41 @@
+// +build linux
+
+// Package sys provides access to the Get Child and Set Child prctl flags.
+// See http://man7.org/linux/man-pages/man2/prctl.2.html
+package sys
+
+import (
+	"unsafe"
+
+	"golang.org/x/sys/unix"
+)
+
+// GetSubreaper returns the subreaper setting for the calling process
+func GetSubreaper() (int, error) {
+	var i uintptr
+	// PR_GET_CHILD_SUBREAPER allows retrieving the current child
+	// subreaper.
+	// Returns the "child subreaper" setting of the caller, in the
+	// location pointed to by (int *) arg2.
+	if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil {
+		return -1, err
+	}
+	return int(i), nil
+}
+
+// SetSubreaper sets the value i as the subreaper setting for the calling process
+func SetSubreaper(i int) error {
+	// PR_SET_CHILD_SUBREAPER allows setting the child subreaper.
+	// If arg2 is nonzero, set the "child subreaper" attribute of the
+	// calling process; if arg2 is zero, unset the attribute.  When a
+	// process is marked as a child subreaper, all of the children
+	// that it creates, and their descendants, will be marked as
+	// having a subreaper.  In effect, a subreaper fulfills the role
+	// of init(1) for its descendant processes.  Upon termination of
+	// a process that is orphaned (i.e., its immediate parent has
+	// already terminated) and marked as having a subreaper, the
+	// nearest still living ancestor subreaper will receive a SIGCHLD
+	// signal and be able to wait(2) on the process to discover its
+	// termination status.
+	return unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0)
+}
diff --git a/vendor/github.com/containerd/containerd/sys/prctl_solaris.go b/vendor/github.com/containerd/containerd/sys/prctl_solaris.go
new file mode 100644
index 0000000..9443f14
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/prctl_solaris.go
@@ -0,0 +1,19 @@
+// +build solaris
+
+package sys
+
+import (
+	"errors"
+)
+
+//Solaris TODO
+
+// GetSubreaper returns the subreaper setting for the calling process
+func GetSubreaper() (int, error) {
+	return 0, errors.New("osutils GetSubreaper not implemented on Solaris")
+}
+
+// SetSubreaper sets the value i as the subreaper setting for the calling process
+func SetSubreaper(i int) error {
+	return errors.New("osutils SetSubreaper not implemented on Solaris")
+}
diff --git a/vendor/github.com/containerd/containerd/sys/proc.go b/vendor/github.com/containerd/containerd/sys/proc.go
new file mode 100644
index 0000000..fbe7b51
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/proc.go
@@ -0,0 +1,64 @@
+// +build linux
+
+package sys
+
+import (
+	"bufio"
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+
+	"github.com/opencontainers/runc/libcontainer/system"
+)
+
+const nanoSecondsPerSecond = 1e9
+
+var clockTicksPerSecond = uint64(system.GetClockTicks())
+
+// GetSystemCPUUsage returns the host system's cpu usage in
+// nanoseconds. An error is returned if the format of the underlying
+// file does not match.
+//
+// Uses /proc/stat defined by POSIX. Looks for the cpu
+// statistics line and then sums up the first seven fields
+// provided. See `man 5 proc` for details on specific field
+// information.
+func GetSystemCPUUsage() (uint64, error) {
+	var line string
+	f, err := os.Open("/proc/stat")
+	if err != nil {
+		return 0, err
+	}
+	bufReader := bufio.NewReaderSize(nil, 128)
+	defer func() {
+		bufReader.Reset(nil)
+		f.Close()
+	}()
+	bufReader.Reset(f)
+	err = nil
+	for err == nil {
+		line, err = bufReader.ReadString('\n')
+		if err != nil {
+			break
+		}
+		parts := strings.Fields(line)
+		switch parts[0] {
+		case "cpu":
+			if len(parts) < 8 {
+				return 0, fmt.Errorf("bad format of cpu stats")
+			}
+			var totalClockTicks uint64
+			for _, i := range parts[1:8] {
+				v, err := strconv.ParseUint(i, 10, 64)
+				if err != nil {
+					return 0, fmt.Errorf("error parsing cpu stats")
+				}
+				totalClockTicks += v
+			}
+			return (totalClockTicks * nanoSecondsPerSecond) /
+				clockTicksPerSecond, nil
+		}
+	}
+	return 0, fmt.Errorf("bad stats format")
+}
diff --git a/vendor/github.com/containerd/containerd/sys/reaper.go b/vendor/github.com/containerd/containerd/sys/reaper.go
new file mode 100644
index 0000000..bbc5a1e
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/reaper.go
@@ -0,0 +1,51 @@
+// +build !windows
+
+package sys
+
+import "golang.org/x/sys/unix"
+
+// Exit is the wait4 information from an exited process
+type Exit struct {
+	Pid    int
+	Status int
+}
+
+// Reap reaps all child processes for the calling process and returns their
+// exit information
+func Reap(wait bool) (exits []Exit, err error) {
+	var (
+		ws  unix.WaitStatus
+		rus unix.Rusage
+	)
+	flag := unix.WNOHANG
+	if wait {
+		flag = 0
+	}
+	for {
+		pid, err := unix.Wait4(-1, &ws, flag, &rus)
+		if err != nil {
+			if err == unix.ECHILD {
+				return exits, nil
+			}
+			return exits, err
+		}
+		if pid <= 0 {
+			return exits, nil
+		}
+		exits = append(exits, Exit{
+			Pid:    pid,
+			Status: exitStatus(ws),
+		})
+	}
+}
+
+const exitSignalOffset = 128
+
+// exitStatus returns the correct exit status for a process based on if it
+// was signaled or exited cleanly
+func exitStatus(status unix.WaitStatus) int {
+	if status.Signaled() {
+		return exitSignalOffset + int(status.Signal())
+	}
+	return status.ExitStatus()
+}
diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go
new file mode 100644
index 0000000..0d5f049
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go
@@ -0,0 +1,59 @@
+// +build !windows
+
+package sys
+
+import (
+	"net"
+	"os"
+	"path/filepath"
+
+	"golang.org/x/sys/unix"
+)
+
+// CreateUnixSocket creates a unix socket and returns the listener
+func CreateUnixSocket(path string) (net.Listener, error) {
+	if err := os.MkdirAll(filepath.Dir(path), 0660); err != nil {
+		return nil, err
+	}
+	if err := unix.Unlink(path); err != nil && !os.IsNotExist(err) {
+		return nil, err
+	}
+	return net.Listen("unix", path)
+}
+
+// GetLocalListener returns a listerner out of a unix socket.
+func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
+	// Ensure parent directory is created
+	if err := mkdirAs(filepath.Dir(path), uid, gid); err != nil {
+		return nil, err
+	}
+
+	l, err := CreateUnixSocket(path)
+	if err != nil {
+		return l, err
+	}
+
+	if err := os.Chmod(path, 0660); err != nil {
+		l.Close()
+		return nil, err
+	}
+
+	if err := os.Chown(path, uid, gid); err != nil {
+		l.Close()
+		return nil, err
+	}
+
+	return l, nil
+}
+
+func mkdirAs(path string, uid, gid int) error {
+	if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) {
+		return err
+	}
+
+	if err := os.Mkdir(path, 0770); err != nil {
+		return err
+	}
+
+	return os.Chown(path, uid, gid)
+}
diff --git a/vendor/github.com/containerd/containerd/sys/socket_windows.go b/vendor/github.com/containerd/containerd/sys/socket_windows.go
new file mode 100644
index 0000000..de25c08
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/socket_windows.go
@@ -0,0 +1,16 @@
+// +build windows
+
+package sys
+
+import (
+	"net"
+
+	"github.com/Microsoft/go-winio"
+)
+
+// GetLocalListener returns a Listernet out of a named pipe.
+// `path` must be of the form of `\\.\pipe\<pipename>`
+// (see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365150)
+func GetLocalListener(path string, uid, gid int) (net.Listener, error) {
+	return winio.ListenPipe(path, nil)
+}
diff --git a/vendor/github.com/containerd/containerd/sys/stat_bsd.go b/vendor/github.com/containerd/containerd/sys/stat_bsd.go
new file mode 100644
index 0000000..13db2b3
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/stat_bsd.go
@@ -0,0 +1,19 @@
+// +build darwin freebsd
+
+package sys
+
+import (
+	"syscall"
+)
+
+func StatAtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Atimespec
+}
+
+func StatCtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Ctimespec
+}
+
+func StatMtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Mtimespec
+}
diff --git a/vendor/github.com/containerd/containerd/sys/stat_unix.go b/vendor/github.com/containerd/containerd/sys/stat_unix.go
new file mode 100644
index 0000000..da13ed2
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/sys/stat_unix.go
@@ -0,0 +1,19 @@
+// +build linux solaris
+
+package sys
+
+import (
+	"syscall"
+)
+
+func StatAtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Atim
+}
+
+func StatCtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Ctim
+}
+
+func StatMtime(st *syscall.Stat_t) syscall.Timespec {
+	return st.Mtim
+}
diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go
new file mode 100644
index 0000000..6b9af1d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/task.go
@@ -0,0 +1,603 @@
+package containerd
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"fmt"
+	"io"
+	goruntime "runtime"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+
+	"github.com/containerd/containerd/api/services/tasks/v1"
+	"github.com/containerd/containerd/api/types"
+	"github.com/containerd/containerd/content"
+	"github.com/containerd/containerd/diff"
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/images"
+	"github.com/containerd/containerd/log"
+	"github.com/containerd/containerd/mount"
+	"github.com/containerd/containerd/plugin"
+	"github.com/containerd/containerd/rootfs"
+	"github.com/containerd/typeurl"
+	google_protobuf "github.com/gogo/protobuf/types"
+	digest "github.com/opencontainers/go-digest"
+	"github.com/opencontainers/image-spec/specs-go/v1"
+	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+	"github.com/pkg/errors"
+)
+
+// UnknownExitStatus is returned when containerd is unable to
+// determine the exit status of a process. This can happen if the process never starts
+// or if an error was encountered when obtaining the exit status, it is set to 255.
+const UnknownExitStatus = 255
+
+const (
+	checkpointDateFormat = "01-02-2006-15:04:05"
+	checkpointNameFormat = "containerd.io/checkpoint/%s:%s"
+)
+
+// Status returns process status and exit information
+type Status struct {
+	// Status of the process
+	Status ProcessStatus
+	// ExitStatus returned by the process
+	ExitStatus uint32
+	// ExitedTime is the time at which the process died
+	ExitTime time.Time
+}
+
+type ProcessInfo struct {
+	// Pid is the process ID
+	Pid uint32
+	// Info includes additional process information
+	// Info varies by platform
+	Info *google_protobuf.Any
+}
+
+// ProcessStatus returns a human readable status for the Process representing its current status
+type ProcessStatus string
+
+const (
+	// Running indicates the process is currently executing
+	Running ProcessStatus = "running"
+	// Created indicates the process has been created within containerd but the
+	// user's defined process has not started
+	Created ProcessStatus = "created"
+	// Stopped indicates that the process has ran and exited
+	Stopped ProcessStatus = "stopped"
+	// Paused indicates that the process is currently paused
+	Paused ProcessStatus = "paused"
+	// Pausing indicates that the process is currently switching from a
+	// running state into a paused state
+	Pausing ProcessStatus = "pausing"
+	// Unknown indicates that we could not determine the status from the runtime
+	Unknown ProcessStatus = "unknown"
+)
+
+// IOCloseInfo allows specific io pipes to be closed on a process
+type IOCloseInfo struct {
+	Stdin bool
+}
+
+// IOCloserOpts allows the caller to set specific pipes as closed on a process
+type IOCloserOpts func(*IOCloseInfo)
+
+// WithStdinCloser closes the stdin of a process
+func WithStdinCloser(r *IOCloseInfo) {
+	r.Stdin = true
+}
+
+// CheckpointTaskInfo allows specific checkpoint information to be set for the task
+type CheckpointTaskInfo struct {
+	Name string
+	// ParentCheckpoint is the digest of a parent checkpoint
+	ParentCheckpoint digest.Digest
+	// Options hold runtime specific settings for checkpointing a task
+	Options interface{}
+}
+
+// CheckpointTaskOpts allows the caller to set checkpoint options
+type CheckpointTaskOpts func(*CheckpointTaskInfo) error
+
+// TaskInfo sets options for task creation
+type TaskInfo struct {
+	// Checkpoint is the Descriptor for an existing checkpoint that can be used
+	// to restore a task's runtime and memory state
+	Checkpoint *types.Descriptor
+	// RootFS is a list of mounts to use as the task's root filesystem
+	RootFS []mount.Mount
+	// Options hold runtime specific settings for task creation
+	Options interface{}
+}
+
+// Task is the executable object within containerd
+type Task interface {
+	Process
+
+	// Pause suspends the execution of the task
+	Pause(context.Context) error
+	// Resume the execution of the task
+	Resume(context.Context) error
+	// Exec creates a new process inside the task
+	Exec(context.Context, string, *specs.Process, IOCreation) (Process, error)
+	// Pids returns a list of system specific process ids inside the task
+	Pids(context.Context) ([]ProcessInfo, error)
+	// Checkpoint serializes the runtime and memory information of a task into an
+	// OCI Index that can be push and pulled from a remote resource.
+	//
+	// Additional software like CRIU maybe required to checkpoint and restore tasks
+	Checkpoint(context.Context, ...CheckpointTaskOpts) (Image, error)
+	// Update modifies executing tasks with updated settings
+	Update(context.Context, ...UpdateTaskOpts) error
+	// LoadProcess loads a previously created exec'd process
+	LoadProcess(context.Context, string, IOAttach) (Process, error)
+	// Metrics returns task metrics for runtime specific metrics
+	//
+	// The metric types are generic to containerd and change depending on the runtime
+	// For the built in Linux runtime, github.com/containerd/cgroups.Metrics
+	// are returned in protobuf format
+	Metrics(context.Context) (*types.Metric, error)
+}
+
+var _ = (Task)(&task{})
+
+type task struct {
+	client *Client
+
+	io  IO
+	id  string
+	pid uint32
+
+	mu sync.Mutex
+}
+
+// Pid returns the pid or process id for the task
+func (t *task) Pid() uint32 {
+	return t.pid
+}
+
+func (t *task) Start(ctx context.Context) error {
+	r, err := t.client.TaskService().Start(ctx, &tasks.StartRequest{
+		ContainerID: t.id,
+	})
+	if err != nil {
+		t.io.Close()
+		return errdefs.FromGRPC(err)
+	}
+	t.pid = r.Pid
+	return nil
+}
+
+func (t *task) Kill(ctx context.Context, s syscall.Signal, opts ...KillOpts) error {
+	var i KillInfo
+	for _, o := range opts {
+		if err := o(ctx, t, &i); err != nil {
+			return err
+		}
+	}
+	_, err := t.client.TaskService().Kill(ctx, &tasks.KillRequest{
+		Signal:      uint32(s),
+		ContainerID: t.id,
+		All:         i.All,
+	})
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	return nil
+}
+
+func (t *task) Pause(ctx context.Context) error {
+	_, err := t.client.TaskService().Pause(ctx, &tasks.PauseTaskRequest{
+		ContainerID: t.id,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (t *task) Resume(ctx context.Context) error {
+	_, err := t.client.TaskService().Resume(ctx, &tasks.ResumeTaskRequest{
+		ContainerID: t.id,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (t *task) Status(ctx context.Context) (Status, error) {
+	r, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{
+		ContainerID: t.id,
+	})
+	if err != nil {
+		return Status{}, errdefs.FromGRPC(err)
+	}
+	return Status{
+		Status:     ProcessStatus(strings.ToLower(r.Process.Status.String())),
+		ExitStatus: r.Process.ExitStatus,
+		ExitTime:   r.Process.ExitedAt,
+	}, nil
+}
+
+func (t *task) Wait(ctx context.Context) (<-chan ExitStatus, error) {
+	c := make(chan ExitStatus, 1)
+	go func() {
+		defer close(c)
+		r, err := t.client.TaskService().Wait(ctx, &tasks.WaitRequest{
+			ContainerID: t.id,
+		})
+		if err != nil {
+			c <- ExitStatus{
+				code: UnknownExitStatus,
+				err:  err,
+			}
+			return
+		}
+		c <- ExitStatus{
+			code:     r.ExitStatus,
+			exitedAt: r.ExitedAt,
+		}
+	}()
+	return c, nil
+}
+
+// Delete deletes the task and its runtime state
+// it returns the exit status of the task and any errors that were encountered
+// during cleanup
+func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStatus, error) {
+	for _, o := range opts {
+		if err := o(ctx, t); err != nil {
+			return nil, err
+		}
+	}
+	status, err := t.Status(ctx)
+	if err != nil && errdefs.IsNotFound(err) {
+		return nil, err
+	}
+	switch status.Status {
+	case Stopped, Unknown, "":
+	case Created:
+		if t.client.runtime == fmt.Sprintf("%s.%s", plugin.RuntimePlugin, "windows") {
+			// On windows Created is akin to Stopped
+			break
+		}
+		fallthrough
+	default:
+		return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "task must be stopped before deletion: %s", status.Status)
+	}
+	if t.io != nil {
+		t.io.Cancel()
+		t.io.Wait()
+		t.io.Close()
+	}
+	r, err := t.client.TaskService().Delete(ctx, &tasks.DeleteTaskRequest{
+		ContainerID: t.id,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
+}
+
+func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate IOCreation) (Process, error) {
+	if id == "" {
+		return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty")
+	}
+	i, err := ioCreate(id)
+	if err != nil {
+		return nil, err
+	}
+	any, err := typeurl.MarshalAny(spec)
+	if err != nil {
+		return nil, err
+	}
+	cfg := i.Config()
+	request := &tasks.ExecProcessRequest{
+		ContainerID: t.id,
+		ExecID:      id,
+		Terminal:    cfg.Terminal,
+		Stdin:       cfg.Stdin,
+		Stdout:      cfg.Stdout,
+		Stderr:      cfg.Stderr,
+		Spec:        any,
+	}
+	if _, err := t.client.TaskService().Exec(ctx, request); err != nil {
+		i.Cancel()
+		i.Wait()
+		i.Close()
+		return nil, errdefs.FromGRPC(err)
+	}
+	return &process{
+		id:   id,
+		task: t,
+		io:   i,
+	}, nil
+}
+
+func (t *task) Pids(ctx context.Context) ([]ProcessInfo, error) {
+	response, err := t.client.TaskService().ListPids(ctx, &tasks.ListPidsRequest{
+		ContainerID: t.id,
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+	var processList []ProcessInfo
+	for _, p := range response.Processes {
+		processList = append(processList, ProcessInfo{
+			Pid:  p.Pid,
+			Info: p.Info,
+		})
+	}
+	return processList, nil
+}
+
+func (t *task) CloseIO(ctx context.Context, opts ...IOCloserOpts) error {
+	r := &tasks.CloseIORequest{
+		ContainerID: t.id,
+	}
+	var i IOCloseInfo
+	for _, o := range opts {
+		o(&i)
+	}
+	r.Stdin = i.Stdin
+	_, err := t.client.TaskService().CloseIO(ctx, r)
+	return errdefs.FromGRPC(err)
+}
+
+func (t *task) IO() IO {
+	return t.io
+}
+
+func (t *task) Resize(ctx context.Context, w, h uint32) error {
+	_, err := t.client.TaskService().ResizePty(ctx, &tasks.ResizePtyRequest{
+		ContainerID: t.id,
+		Width:       w,
+		Height:      h,
+	})
+	return errdefs.FromGRPC(err)
+}
+
+func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (Image, error) {
+	request := &tasks.CheckpointTaskRequest{
+		ContainerID: t.id,
+	}
+	var i CheckpointTaskInfo
+	for _, o := range opts {
+		if err := o(&i); err != nil {
+			return nil, err
+		}
+	}
+	// set a default name
+	if i.Name == "" {
+		i.Name = fmt.Sprintf(checkpointNameFormat, t.id, time.Now().Format(checkpointDateFormat))
+	}
+	request.ParentCheckpoint = i.ParentCheckpoint
+	if i.Options != nil {
+		any, err := typeurl.MarshalAny(i.Options)
+		if err != nil {
+			return nil, err
+		}
+		request.Options = any
+	}
+	// make sure we pause it and resume after all other filesystem operations are completed
+	if err := t.Pause(ctx); err != nil {
+		return nil, err
+	}
+	defer t.Resume(ctx)
+	cr, err := t.client.ContainerService().Get(ctx, t.id)
+	if err != nil {
+		return nil, err
+	}
+	index := v1.Index{
+		Annotations: make(map[string]string),
+	}
+	// make sure we clear the gc root labels reguardless of success
+	var clearRoots []ocispec.Descriptor
+	defer func() {
+		for _, r := range append(index.Manifests, clearRoots...) {
+			if err := clearRootGCLabel(ctx, t.client, r); err != nil {
+				log.G(ctx).WithError(err).WithField("dgst", r.Digest).Warnf("failed to remove root marker")
+			}
+		}
+	}()
+	if err := t.checkpointTask(ctx, &index, request); err != nil {
+		return nil, err
+	}
+	if cr.Image != "" {
+		if err := t.checkpointImage(ctx, &index, cr.Image); err != nil {
+			return nil, err
+		}
+		index.Annotations["image.name"] = cr.Image
+	}
+	if cr.SnapshotKey != "" {
+		if err := t.checkpointRWSnapshot(ctx, &index, cr.Snapshotter, cr.SnapshotKey); err != nil {
+			return nil, err
+		}
+	}
+	desc, err := t.writeIndex(ctx, &index)
+	if err != nil {
+		return nil, err
+	}
+	clearRoots = append(clearRoots, desc)
+	im := images.Image{
+		Name:   i.Name,
+		Target: desc,
+		Labels: map[string]string{
+			"containerd.io/checkpoint": "true",
+		},
+	}
+	if im, err = t.client.ImageService().Create(ctx, im); err != nil {
+		return nil, err
+	}
+	return &image{
+		client: t.client,
+		i:      im,
+	}, nil
+}
+
+// UpdateTaskInfo allows updated specific settings to be changed on a task
+type UpdateTaskInfo struct {
+	// Resources updates a tasks resource constraints
+	Resources interface{}
+}
+
+// UpdateTaskOpts allows a caller to update task settings
+type UpdateTaskOpts func(context.Context, *Client, *UpdateTaskInfo) error
+
+func (t *task) Update(ctx context.Context, opts ...UpdateTaskOpts) error {
+	request := &tasks.UpdateTaskRequest{
+		ContainerID: t.id,
+	}
+	var i UpdateTaskInfo
+	for _, o := range opts {
+		if err := o(ctx, t.client, &i); err != nil {
+			return err
+		}
+	}
+	if i.Resources != nil {
+		any, err := typeurl.MarshalAny(i.Resources)
+		if err != nil {
+			return err
+		}
+		request.Resources = any
+	}
+	_, err := t.client.TaskService().Update(ctx, request)
+	return errdefs.FromGRPC(err)
+}
+
+func (t *task) LoadProcess(ctx context.Context, id string, ioAttach IOAttach) (Process, error) {
+	response, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{
+		ContainerID: t.id,
+		ExecID:      id,
+	})
+	if err != nil {
+		err = errdefs.FromGRPC(err)
+		if errdefs.IsNotFound(err) {
+			return nil, errors.Wrapf(err, "no running process found")
+		}
+		return nil, err
+	}
+	var i IO
+	if ioAttach != nil {
+		if i, err = attachExistingIO(response, ioAttach); err != nil {
+			return nil, err
+		}
+	}
+	return &process{
+		id:   id,
+		task: t,
+		io:   i,
+	}, nil
+}
+
+func (t *task) Metrics(ctx context.Context) (*types.Metric, error) {
+	response, err := t.client.TaskService().Metrics(ctx, &tasks.MetricsRequest{
+		Filters: []string{
+			"id==" + t.id,
+		},
+	})
+	if err != nil {
+		return nil, errdefs.FromGRPC(err)
+	}
+
+	if response.Metrics == nil {
+		_, err := t.Status(ctx)
+		if err != nil && errdefs.IsNotFound(err) {
+			return nil, err
+		}
+		return nil, errors.New("no metrics received")
+	}
+
+	return response.Metrics[0], nil
+}
+
+func (t *task) checkpointTask(ctx context.Context, index *v1.Index, request *tasks.CheckpointTaskRequest) error {
+	response, err := t.client.TaskService().Checkpoint(ctx, request)
+	if err != nil {
+		return errdefs.FromGRPC(err)
+	}
+	// add the checkpoint descriptors to the index
+	for _, d := range response.Descriptors {
+		index.Manifests = append(index.Manifests, v1.Descriptor{
+			MediaType: d.MediaType,
+			Size:      d.Size_,
+			Digest:    d.Digest,
+			Platform: &v1.Platform{
+				OS:           goruntime.GOOS,
+				Architecture: goruntime.GOARCH,
+			},
+		})
+	}
+	return nil
+}
+
+func (t *task) checkpointRWSnapshot(ctx context.Context, index *v1.Index, snapshotterName string, id string) error {
+	opts := []diff.Opt{
+		diff.WithReference(fmt.Sprintf("checkpoint-rw-%s", id)),
+		diff.WithLabels(map[string]string{
+			"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
+		}),
+	}
+	rw, err := rootfs.Diff(ctx, id, t.client.SnapshotService(snapshotterName), t.client.DiffService(), opts...)
+	if err != nil {
+		return err
+	}
+	rw.Platform = &v1.Platform{
+		OS:           goruntime.GOOS,
+		Architecture: goruntime.GOARCH,
+	}
+	index.Manifests = append(index.Manifests, rw)
+	return nil
+}
+
+func (t *task) checkpointImage(ctx context.Context, index *v1.Index, image string) error {
+	if image == "" {
+		return fmt.Errorf("cannot checkpoint image with empty name")
+	}
+	ir, err := t.client.ImageService().Get(ctx, image)
+	if err != nil {
+		return err
+	}
+	index.Manifests = append(index.Manifests, ir.Target)
+	return nil
+}
+
+func (t *task) writeIndex(ctx context.Context, index *v1.Index) (d v1.Descriptor, err error) {
+	labels := map[string]string{
+		"containerd.io/gc.root": time.Now().UTC().Format(time.RFC3339),
+	}
+	for i, m := range index.Manifests {
+		labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = m.Digest.String()
+	}
+	buf := bytes.NewBuffer(nil)
+	if err := json.NewEncoder(buf).Encode(index); err != nil {
+		return v1.Descriptor{}, err
+	}
+	return writeContent(ctx, t.client.ContentStore(), v1.MediaTypeImageIndex, t.id, buf, content.WithLabels(labels))
+}
+
+func writeContent(ctx context.Context, store content.Store, mediaType, ref string, r io.Reader, opts ...content.Opt) (d v1.Descriptor, err error) {
+	writer, err := store.Writer(ctx, ref, 0, "")
+	if err != nil {
+		return d, err
+	}
+	defer writer.Close()
+	size, err := io.Copy(writer, r)
+	if err != nil {
+		return d, err
+	}
+	if err := writer.Commit(ctx, size, "", opts...); err != nil {
+		return d, err
+	}
+	return v1.Descriptor{
+		MediaType: mediaType,
+		Digest:    writer.Digest(),
+		Size:      size,
+	}, nil
+}
+
+func clearRootGCLabel(ctx context.Context, client *Client, desc ocispec.Descriptor) error {
+	info := content.Info{Digest: desc.Digest}
+	_, err := client.ContentStore().Update(ctx, info, "labels.containerd.io/gc.root")
+	return err
+}
diff --git a/vendor/github.com/containerd/containerd/task_opts.go b/vendor/github.com/containerd/containerd/task_opts.go
new file mode 100644
index 0000000..261ccba
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/task_opts.go
@@ -0,0 +1,77 @@
+package containerd
+
+import (
+	"context"
+	"syscall"
+
+	"github.com/containerd/containerd/errdefs"
+	"github.com/containerd/containerd/linux/runcopts"
+	"github.com/containerd/containerd/mount"
+)
+
+// NewTaskOpts allows the caller to set options on a new task
+type NewTaskOpts func(context.Context, *Client, *TaskInfo) error
+
+// WithRootFS allows a task to be created without a snapshot being allocated to its container
+func WithRootFS(mounts []mount.Mount) NewTaskOpts {
+	return func(ctx context.Context, c *Client, ti *TaskInfo) error {
+		ti.RootFS = mounts
+		return nil
+	}
+}
+
+// WithExit causes the task to exit after a successful checkpoint
+func WithExit(r *CheckpointTaskInfo) error {
+	r.Options = &runcopts.CheckpointOptions{
+		Exit: true,
+	}
+	return nil
+}
+
+// WithCheckpointName sets the image name for the checkpoint
+func WithCheckpointName(name string) CheckpointTaskOpts {
+	return func(r *CheckpointTaskInfo) error {
+		r.Name = name
+		return nil
+	}
+}
+
+// ProcessDeleteOpts allows the caller to set options for the deletion of a task
+type ProcessDeleteOpts func(context.Context, Process) error
+
+// WithProcessKill will forcefully kill and delete a process
+func WithProcessKill(ctx context.Context, p Process) error {
+	ctx, cancel := context.WithCancel(ctx)
+	defer cancel()
+	// ignore errors to wait and kill as we are forcefully killing
+	// the process and don't care about the exit status
+	s, err := p.Wait(ctx)
+	if err != nil {
+		return err
+	}
+	if err := p.Kill(ctx, syscall.SIGKILL, WithKillAll); err != nil {
+		if errdefs.IsFailedPrecondition(err) || errdefs.IsNotFound(err) {
+			return nil
+		}
+		return err
+	}
+	// wait for the process to fully stop before letting the rest of the deletion complete
+	<-s
+	return nil
+}
+
+// KillInfo contains information on how to process a Kill action
+type KillInfo struct {
+	// All kills all processes inside the task
+	// only valid on tasks, ignored on processes
+	All bool
+}
+
+// KillOpts allows options to be set for the killing of a process
+type KillOpts func(context.Context, Process, *KillInfo) error
+
+// WithKillAll kills all processes for a task
+func WithKillAll(ctx context.Context, p Process, i *KillInfo) error {
+	i.All = true
+	return nil
+}
diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf
old mode 100755
new mode 100644
index 734f696..1255a4e
--- a/vendor/github.com/containerd/containerd/vendor.conf
+++ b/vendor/github.com/containerd/containerd/vendor.conf
@@ -1,33 +1,43 @@
-github.com/sirupsen/logrus v1.0.1
-github.com/cloudfoundry/gosigar 3ed7c74352dae6dc00bdc8c74045375352e3ec05
-github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca
-github.com/coreos/go-systemd 7b2428fec40033549c68f54e26e89e7ca9a9ce31
-github.com/cyberdelia/go-metrics-graphite 7e54b5c2aa6eaff4286c44129c3def899dff528c
-github.com/docker/docker f577caff19d486d8d01443507d891cb1b0891cdc
-github.com/docker/go-units 5d2041e26a699eaca682e2ea41c8f891e1060444
-github.com/godbus/dbus e2cf28118e66a6a63db46cf6088a35d2054d3bb0
-github.com/golang/glog 23def4e6c14b4da8ac2ed8007337bc5eb5007998
-github.com/golang/protobuf 8ee79997227bf9b34611aee7946ae64735e6fd93
-github.com/opencontainers/runc d40db12e72a40109dfcf28539f5ee0930d2f0277
+github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
+github.com/containerd/go-runc ed1cbe1fc31f5fb2359d3a54b6330d1a097858b7
+github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
+github.com/containerd/cgroups f7dd103d3e4e696aa67152f6b4ddd1779a3455a9
+github.com/containerd/typeurl f6943554a7e7e88b3c14aad190bf05932da84788
+github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87
+github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
+github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
+github.com/prometheus/client_golang v0.8.0
+github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6
+github.com/prometheus/common 195bde7883f7c39ea62b0d92ab7359b5327065cb
+github.com/prometheus/procfs fcdb11ccb4389efb1b210b7ffb623ab71c5fdd60
+github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9
+github.com/matttproud/golang_protobuf_extensions v1.0.0
+github.com/docker/go-units v0.3.1
+github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
+github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
 github.com/opencontainers/runtime-spec v1.0.0
-github.com/rcrowley/go-metrics eeba7bd0dd01ace6e690fa833b3f22aaec29af43
-github.com/satori/go.uuid f9ab0dce87d815821e221626b772e3475a0d2749
-github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
-github.com/vishvananda/netlink adb0f53af689dd38f1443eba79489feaacf0b22e
-github.com/Azure/go-ansiterm 70b2c90b260171e829f1ebd7c17f600c11858dbe
-golang.org/x/net 991d3e32f76f19ee6d9caadb3a22eae8d23315f7 https://github.com/golang/net.git
-golang.org/x/sys 0e0164865330d5cf1c00247be08330bf96e2f87c https://github.com/golang/sys
-google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go.git
-github.com/seccomp/libseccomp-golang 1b506fc7c24eec5a3693cdcbed40d9c226cfc6a1
-github.com/tonistiigi/fifo b45391ebcd3d282404092c04a2b015b37df12383
-github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9
-
-github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3
-github.com/go-check/check a625211d932a2a643d0d17352095f03fb7774663 https://github.com/cpuguy83/check.git
-
-github.com/containerd/console a3863895279f5104533fd999c1babf80faffd98c
-github.com/containerd/go-runc 5fe4d8cb7fdc0fae5f5a7f4f1d65a565032401b2
-
-# dependencies of docker/pkg/listeners
-github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
-github.com/Microsoft/go-winio v0.3.2
+github.com/opencontainers/runc 0351df1c5a66838d0c392b4ac4cf9450de844e2d
+github.com/sirupsen/logrus v1.0.0
+github.com/containerd/btrfs cc52c4dea2ce11a44e6639e561bb5c2af9ada9e3
+github.com/stretchr/testify v1.1.4
+github.com/davecgh/go-spew v1.1.0
+github.com/pmezard/go-difflib v1.0.0
+github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6
+github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca
+golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
+google.golang.org/grpc v1.3.0
+github.com/pkg/errors v0.8.0
+github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
+golang.org/x/sys 314a259e304ff91bd6985da2a7149bbf91237993 https://github.com/golang/sys
+github.com/opencontainers/image-spec v1.0.0
+github.com/containerd/continuity cf279e6ac893682272b4479d4c67fd3abf878b4e
+golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
+github.com/BurntSushi/toml v0.2.0-21-g9906417
+github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
+github.com/Microsoft/go-winio v0.4.4
+github.com/Microsoft/hcsshim v0.6.3
+github.com/Microsoft/opengcs v0.3.2
+github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
+google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
+golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
+github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf
diff --git a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/doc.go b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/doc.go
new file mode 100644
index 0000000..1712b1a
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/doc.go
@@ -0,0 +1,2 @@
+// hcsshimtypes holds the windows runtime specific types
+package hcsshimtypes
diff --git a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go
new file mode 100644
index 0000000..77c344d
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.pb.go
@@ -0,0 +1,719 @@
+// Code generated by protoc-gen-gogo.
+// source: github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto
+// DO NOT EDIT!
+
+/*
+	Package hcsshimtypes is a generated protocol buffer package.
+
+	It is generated from these files:
+		github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto
+
+	It has these top-level messages:
+		CreateOptions
+		ProcessDetails
+*/
+package hcsshimtypes
+
+import proto "github.com/gogo/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import _ "github.com/gogo/protobuf/gogoproto"
+import _ "github.com/gogo/protobuf/types"
+import _ "github.com/gogo/protobuf/types"
+
+import time "time"
+
+import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
+
+import strings "strings"
+import reflect "reflect"
+
+import io "io"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+var _ = time.Kitchen
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
+
+type CreateOptions struct {
+	TerminateDuration time.Duration `protobuf:"bytes,1,opt,name=terminate_duration,json=terminateDuration,stdduration" json:"terminate_duration"`
+}
+
+func (m *CreateOptions) Reset()                    { *m = CreateOptions{} }
+func (*CreateOptions) ProtoMessage()               {}
+func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorHcsshim, []int{0} }
+
+// ProcessDetails contains additional information about a process
+// ProcessDetails is made of the same fields as found in hcsshim.ProcessListItem
+type ProcessDetails struct {
+	ImageName                    string    `protobuf:"bytes,1,opt,name=image_name,json=imageName,proto3" json:"image_name,omitempty"`
+	CreatedAt                    time.Time `protobuf:"bytes,2,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
+	KernelTime_100Ns             uint64    `protobuf:"varint,3,opt,name=kernel_time_100_ns,json=kernelTime100Ns,proto3" json:"kernel_time_100_ns,omitempty"`
+	MemoryCommitBytes            uint64    `protobuf:"varint,4,opt,name=memory_commit_bytes,json=memoryCommitBytes,proto3" json:"memory_commit_bytes,omitempty"`
+	MemoryWorkingSetPrivateBytes uint64    `protobuf:"varint,5,opt,name=memory_working_set_private_bytes,json=memoryWorkingSetPrivateBytes,proto3" json:"memory_working_set_private_bytes,omitempty"`
+	MemoryWorkingSetSharedBytes  uint64    `protobuf:"varint,6,opt,name=memory_working_set_shared_bytes,json=memoryWorkingSetSharedBytes,proto3" json:"memory_working_set_shared_bytes,omitempty"`
+	ProcessID                    uint32    `protobuf:"varint,7,opt,name=process_id,json=processId,proto3" json:"process_id,omitempty"`
+	UserTime_100Ns               uint64    `protobuf:"varint,8,opt,name=user_time_100_ns,json=userTime100Ns,proto3" json:"user_time_100_ns,omitempty"`
+}
+
+func (m *ProcessDetails) Reset()                    { *m = ProcessDetails{} }
+func (*ProcessDetails) ProtoMessage()               {}
+func (*ProcessDetails) Descriptor() ([]byte, []int) { return fileDescriptorHcsshim, []int{1} }
+
+func init() {
+	proto.RegisterType((*CreateOptions)(nil), "containerd.windows.hcsshim.CreateOptions")
+	proto.RegisterType((*ProcessDetails)(nil), "containerd.windows.hcsshim.ProcessDetails")
+}
+func (m *CreateOptions) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	dAtA[i] = 0xa
+	i++
+	i = encodeVarintHcsshim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdDuration(m.TerminateDuration)))
+	n1, err := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.TerminateDuration, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n1
+	return i, nil
+}
+
+func (m *ProcessDetails) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ProcessDetails) MarshalTo(dAtA []byte) (int, error) {
+	var i int
+	_ = i
+	var l int
+	_ = l
+	if len(m.ImageName) > 0 {
+		dAtA[i] = 0xa
+		i++
+		i = encodeVarintHcsshim(dAtA, i, uint64(len(m.ImageName)))
+		i += copy(dAtA[i:], m.ImageName)
+	}
+	dAtA[i] = 0x12
+	i++
+	i = encodeVarintHcsshim(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)))
+	n2, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.CreatedAt, dAtA[i:])
+	if err != nil {
+		return 0, err
+	}
+	i += n2
+	if m.KernelTime_100Ns != 0 {
+		dAtA[i] = 0x18
+		i++
+		i = encodeVarintHcsshim(dAtA, i, uint64(m.KernelTime_100Ns))
+	}
+	if m.MemoryCommitBytes != 0 {
+		dAtA[i] = 0x20
+		i++
+		i = encodeVarintHcsshim(dAtA, i, uint64(m.MemoryCommitBytes))
+	}
+	if m.MemoryWorkingSetPrivateBytes != 0 {
+		dAtA[i] = 0x28
+		i++
+		i = encodeVarintHcsshim(dAtA, i, uint64(m.MemoryWorkingSetPrivateBytes))
+	}
+	if m.MemoryWorkingSetSharedBytes != 0 {
+		dAtA[i] = 0x30
+		i++
+		i = encodeVarintHcsshim(dAtA, i, uint64(m.MemoryWorkingSetSharedBytes))
+	}
+	if m.ProcessID != 0 {
+		dAtA[i] = 0x38
+		i++
+		i = encodeVarintHcsshim(dAtA, i, uint64(m.ProcessID))
+	}
+	if m.UserTime_100Ns != 0 {
+		dAtA[i] = 0x40
+		i++
+		i = encodeVarintHcsshim(dAtA, i, uint64(m.UserTime_100Ns))
+	}
+	return i, nil
+}
+
+func encodeFixed64Hcsshim(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
+	return offset + 8
+}
+func encodeFixed32Hcsshim(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	return offset + 4
+}
+func encodeVarintHcsshim(dAtA []byte, offset int, v uint64) int {
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return offset + 1
+}
+func (m *CreateOptions) Size() (n int) {
+	var l int
+	_ = l
+	l = github_com_gogo_protobuf_types.SizeOfStdDuration(m.TerminateDuration)
+	n += 1 + l + sovHcsshim(uint64(l))
+	return n
+}
+
+func (m *ProcessDetails) Size() (n int) {
+	var l int
+	_ = l
+	l = len(m.ImageName)
+	if l > 0 {
+		n += 1 + l + sovHcsshim(uint64(l))
+	}
+	l = github_com_gogo_protobuf_types.SizeOfStdTime(m.CreatedAt)
+	n += 1 + l + sovHcsshim(uint64(l))
+	if m.KernelTime_100Ns != 0 {
+		n += 1 + sovHcsshim(uint64(m.KernelTime_100Ns))
+	}
+	if m.MemoryCommitBytes != 0 {
+		n += 1 + sovHcsshim(uint64(m.MemoryCommitBytes))
+	}
+	if m.MemoryWorkingSetPrivateBytes != 0 {
+		n += 1 + sovHcsshim(uint64(m.MemoryWorkingSetPrivateBytes))
+	}
+	if m.MemoryWorkingSetSharedBytes != 0 {
+		n += 1 + sovHcsshim(uint64(m.MemoryWorkingSetSharedBytes))
+	}
+	if m.ProcessID != 0 {
+		n += 1 + sovHcsshim(uint64(m.ProcessID))
+	}
+	if m.UserTime_100Ns != 0 {
+		n += 1 + sovHcsshim(uint64(m.UserTime_100Ns))
+	}
+	return n
+}
+
+func sovHcsshim(x uint64) (n int) {
+	for {
+		n++
+		x >>= 7
+		if x == 0 {
+			break
+		}
+	}
+	return n
+}
+func sozHcsshim(x uint64) (n int) {
+	return sovHcsshim(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (this *CreateOptions) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&CreateOptions{`,
+		`TerminateDuration:` + strings.Replace(strings.Replace(this.TerminateDuration.String(), "Duration", "google_protobuf1.Duration", 1), `&`, ``, 1) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func (this *ProcessDetails) String() string {
+	if this == nil {
+		return "nil"
+	}
+	s := strings.Join([]string{`&ProcessDetails{`,
+		`ImageName:` + fmt.Sprintf("%v", this.ImageName) + `,`,
+		`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
+		`KernelTime_100Ns:` + fmt.Sprintf("%v", this.KernelTime_100Ns) + `,`,
+		`MemoryCommitBytes:` + fmt.Sprintf("%v", this.MemoryCommitBytes) + `,`,
+		`MemoryWorkingSetPrivateBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetPrivateBytes) + `,`,
+		`MemoryWorkingSetSharedBytes:` + fmt.Sprintf("%v", this.MemoryWorkingSetSharedBytes) + `,`,
+		`ProcessID:` + fmt.Sprintf("%v", this.ProcessID) + `,`,
+		`UserTime_100Ns:` + fmt.Sprintf("%v", this.UserTime_100Ns) + `,`,
+		`}`,
+	}, "")
+	return s
+}
+func valueToStringHcsshim(v interface{}) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("*%v", pv)
+}
+func (m *CreateOptions) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowHcsshim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TerminateDuration", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthHcsshim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdDurationUnmarshal(&m.TerminateDuration, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipHcsshim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthHcsshim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ProcessDetails) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowHcsshim
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ProcessDetails: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ProcessDetails: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ImageName", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthHcsshim
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ImageName = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthHcsshim
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.CreatedAt, dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KernelTime_100Ns", wireType)
+			}
+			m.KernelTime_100Ns = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.KernelTime_100Ns |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemoryCommitBytes", wireType)
+			}
+			m.MemoryCommitBytes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemoryCommitBytes |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemoryWorkingSetPrivateBytes", wireType)
+			}
+			m.MemoryWorkingSetPrivateBytes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemoryWorkingSetPrivateBytes |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemoryWorkingSetSharedBytes", wireType)
+			}
+			m.MemoryWorkingSetSharedBytes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemoryWorkingSetSharedBytes |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProcessID", wireType)
+			}
+			m.ProcessID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ProcessID |= (uint32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field UserTime_100Ns", wireType)
+			}
+			m.UserTime_100Ns = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.UserTime_100Ns |= (uint64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipHcsshim(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthHcsshim
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipHcsshim(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowHcsshim
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowHcsshim
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			iNdEx += length
+			if length < 0 {
+				return 0, ErrInvalidLengthHcsshim
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowHcsshim
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipHcsshim(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthHcsshim = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowHcsshim   = fmt.Errorf("proto: integer overflow")
+)
+
+func init() {
+	proto.RegisterFile("github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto", fileDescriptorHcsshim)
+}
+
+var fileDescriptorHcsshim = []byte{
+	// 479 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x41, 0x6f, 0xd3, 0x30,
+	0x14, 0xc7, 0x1b, 0x36, 0xc6, 0x62, 0x54, 0x60, 0x86, 0x43, 0x28, 0x90, 0x54, 0xbb, 0x50, 0x09,
+	0x94, 0x74, 0x70, 0xe4, 0x44, 0x5a, 0x21, 0xed, 0x32, 0xa6, 0x0c, 0x09, 0x09, 0x21, 0x59, 0x6e,
+	0xf2, 0x48, 0xad, 0xd5, 0x71, 0x64, 0xbb, 0x54, 0xbd, 0xf1, 0x11, 0x38, 0xf2, 0x49, 0xf8, 0x0c,
+	0x3d, 0x72, 0xe4, 0x34, 0x58, 0x3e, 0x09, 0x8a, 0xed, 0x96, 0x51, 0x38, 0x71, 0xf3, 0xf3, 0xff,
+	0xf7, 0x7e, 0xaf, 0x7e, 0x0d, 0x1a, 0x95, 0x4c, 0x4f, 0xe7, 0x93, 0x38, 0x17, 0x3c, 0xc9, 0x45,
+	0xa5, 0x29, 0xab, 0x40, 0x16, 0x57, 0x8f, 0x0b, 0x56, 0x15, 0x62, 0xa1, 0x92, 0x69, 0xae, 0xd4,
+	0x94, 0x71, 0xbd, 0xac, 0x61, 0x53, 0xc4, 0xb5, 0x14, 0x5a, 0xe0, 0xde, 0x6f, 0x3c, 0x76, 0x78,
+	0xec, 0x88, 0xde, 0xbd, 0x52, 0x94, 0xc2, 0x60, 0x49, 0x7b, 0xb2, 0x1d, 0xbd, 0xb0, 0x14, 0xa2,
+	0x9c, 0x41, 0x62, 0xaa, 0xc9, 0xfc, 0x43, 0x52, 0xcc, 0x25, 0xd5, 0x4c, 0x54, 0x2e, 0x8f, 0xb6,
+	0x73, 0xcd, 0x38, 0x28, 0x4d, 0x79, 0x6d, 0x81, 0xc3, 0x1c, 0x75, 0x47, 0x12, 0xa8, 0x86, 0xd7,
+	0x75, 0xdb, 0xa6, 0x70, 0x86, 0xb0, 0x06, 0xc9, 0x59, 0x45, 0x35, 0x90, 0xb5, 0x2d, 0xf0, 0xfa,
+	0xde, 0xe0, 0xe6, 0xb3, 0xfb, 0xb1, 0xd5, 0xc5, 0x6b, 0x5d, 0x3c, 0x76, 0x40, 0xba, 0xbf, 0xba,
+	0x88, 0x3a, 0x5f, 0x7e, 0x44, 0x5e, 0x76, 0xb0, 0x69, 0x5f, 0x87, 0x87, 0x5f, 0x77, 0xd0, 0xad,
+	0x53, 0x29, 0x72, 0x50, 0x6a, 0x0c, 0x9a, 0xb2, 0x99, 0xc2, 0x8f, 0x10, 0x62, 0x9c, 0x96, 0x40,
+	0x2a, 0xca, 0xc1, 0xe8, 0xfd, 0xcc, 0x37, 0x37, 0x27, 0x94, 0x03, 0x1e, 0x21, 0x94, 0x9b, 0x9f,
+	0x55, 0x10, 0xaa, 0x83, 0x6b, 0x66, 0x7a, 0xef, 0xaf, 0xe9, 0x6f, 0xd6, 0x8f, 0xb1, 0xe3, 0x3f,
+	0xb7, 0xe3, 0x7d, 0xd7, 0xf7, 0x52, 0xe3, 0x27, 0x08, 0x9f, 0x83, 0xac, 0x60, 0x46, 0xda, 0x57,
+	0x93, 0xa3, 0xe1, 0x90, 0x54, 0x2a, 0xd8, 0xe9, 0x7b, 0x83, 0xdd, 0xec, 0xb6, 0x4d, 0x5a, 0xc3,
+	0xd1, 0x70, 0x78, 0xa2, 0x70, 0x8c, 0xee, 0x72, 0xe0, 0x42, 0x2e, 0x49, 0x2e, 0x38, 0x67, 0x9a,
+	0x4c, 0x96, 0x1a, 0x54, 0xb0, 0x6b, 0xe8, 0x03, 0x1b, 0x8d, 0x4c, 0x92, 0xb6, 0x01, 0x7e, 0x85,
+	0xfa, 0x8e, 0x5f, 0x08, 0x79, 0xce, 0xaa, 0x92, 0x28, 0xd0, 0xa4, 0x96, 0xec, 0x63, 0xbb, 0x38,
+	0xdb, 0x7c, 0xdd, 0x34, 0x3f, 0xb4, 0xdc, 0x5b, 0x8b, 0x9d, 0x81, 0x3e, 0xb5, 0x90, 0xf5, 0x8c,
+	0x51, 0xf4, 0x0f, 0x8f, 0x9a, 0x52, 0x09, 0x85, 0xd3, 0xec, 0x19, 0xcd, 0x83, 0x6d, 0xcd, 0x99,
+	0x61, 0xac, 0xe5, 0x29, 0x42, 0xb5, 0x5d, 0x30, 0x61, 0x45, 0x70, 0xa3, 0xef, 0x0d, 0xba, 0x69,
+	0xb7, 0xb9, 0x88, 0x7c, 0xb7, 0xf6, 0xe3, 0x71, 0xe6, 0x3b, 0xe0, 0xb8, 0xc0, 0x8f, 0xd1, 0x9d,
+	0xb9, 0x02, 0xf9, 0xc7, 0x5a, 0xf6, 0xcd, 0x90, 0x6e, 0x7b, 0xbf, 0x59, 0x4a, 0xfa, 0x7e, 0x75,
+	0x19, 0x76, 0xbe, 0x5f, 0x86, 0x9d, 0x4f, 0x4d, 0xe8, 0xad, 0x9a, 0xd0, 0xfb, 0xd6, 0x84, 0xde,
+	0xcf, 0x26, 0xf4, 0xde, 0xa5, 0xff, 0xf5, 0xbd, 0xbf, 0xb8, 0x5a, 0x4c, 0xf6, 0xcc, 0x1f, 0xf9,
+	0xfc, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x22, 0xad, 0xdf, 0xf8, 0x3c, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto
new file mode 100644
index 0000000..7fcc054
--- /dev/null
+++ b/vendor/github.com/containerd/containerd/windows/hcsshimtypes/hcsshim.proto
@@ -0,0 +1,26 @@
+syntax = "proto3";
+
+package containerd.windows.hcsshim;
+
+import "gogoproto/gogo.proto";
+import "google/protobuf/duration.proto";
+import "google/protobuf/timestamp.proto";
+
+option go_package = "github.com/containerd/containerd/windows/hcsshimtypes;hcsshimtypes";
+
+message CreateOptions {
+	google.protobuf.Duration terminate_duration = 1 [(gogoproto.stdduration) = true, (gogoproto.nullable) = false];
+}
+
+// ProcessDetails contains additional information about a process
+// ProcessDetails is made of the same fields as found in hcsshim.ProcessListItem
+message ProcessDetails {
+	string image_name = 1;
+	google.protobuf.Timestamp created_at = 2 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
+	uint64 kernel_time_100_ns = 3;
+	uint64 memory_commit_bytes = 4;
+	uint64 memory_working_set_private_bytes = 5;
+	uint64 memory_working_set_shared_bytes = 6;
+	uint32 process_id = 7;
+	uint64 user_time_100_ns = 8;
+}
diff --git a/vendor/github.com/stevvooe/continuity/LICENSE b/vendor/github.com/containerd/continuity/LICENSE
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/LICENSE
rename to vendor/github.com/containerd/continuity/LICENSE
diff --git a/vendor/github.com/containerd/continuity/README.md b/vendor/github.com/containerd/continuity/README.md
new file mode 100644
index 0000000..0e91ce0
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/README.md
@@ -0,0 +1,74 @@
+# continuity
+
+[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity)
+[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity)
+
+A transport-agnostic, filesystem metadata manifest system
+
+This project is a staging area for experiments in providing transport agnostic
+metadata storage.
+
+Please see https://github.com/opencontainers/specs/issues/11 for more details.
+
+## Manifest Format
+
+A continuity manifest encodes filesystem metadata in Protocol Buffers.
+Please refer to [proto/manifest.proto](proto/manifest.proto).
+
+## Usage
+
+Build:
+
+```console
+$ make
+```
+
+Create a manifest (of this repo itself):
+
+```console
+$ ./bin/continuity build . > /tmp/a.pb
+```
+
+Dump a manifest:
+
+```console
+$ ./bin/continuity ls /tmp/a.pb
+...
+-rw-rw-r--      270 B   /.gitignore
+-rw-rw-r--      88 B    /.mailmap
+-rw-rw-r--      187 B   /.travis.yml
+-rw-rw-r--      359 B   /AUTHORS
+-rw-rw-r--      11 kB   /LICENSE
+-rw-rw-r--      1.5 kB  /Makefile
+...
+-rw-rw-r--      986 B   /testutil_test.go
+drwxrwxr-x      0 B     /version
+-rw-rw-r--      478 B   /version/version.go
+```
+
+Verify a manifest:
+
+```console
+$ ./bin/continuity verify . /tmp/a.pb
+```
+
+Break the directory and restore using the manifest:
+```console
+$ chmod 777 Makefile
+$ ./bin/continuity verify . /tmp/a.pb
+2017/06/23 08:00:34 error verifying manifest: resource "/Makefile" has incorrect mode: -rwxrwxrwx != -rw-rw-r--
+$ ./bin/continuity apply . /tmp/a.pb
+$ stat -c %a Makefile
+664
+$ ./bin/continuity verify . /tmp/a.pb
+```
+
+
+## Contribution Guide
+### Building Proto Package
+
+If you change the proto file you will need to rebuild the generated Go with `go generate`.
+
+```console
+$ go generate ./proto
+```
diff --git a/vendor/github.com/containerd/continuity/devices/devices.go b/vendor/github.com/containerd/continuity/devices/devices.go
new file mode 100644
index 0000000..7086407
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/devices/devices.go
@@ -0,0 +1,5 @@
+package devices
+
+import "fmt"
+
+var ErrNotSupported = fmt.Errorf("not supported")
diff --git a/vendor/github.com/containerd/continuity/devices/devices_darwin.go b/vendor/github.com/containerd/continuity/devices/devices_darwin.go
new file mode 100644
index 0000000..5041e66
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/devices/devices_darwin.go
@@ -0,0 +1,15 @@
+package devices
+
+// from /usr/include/sys/types.h
+
+func getmajor(dev int32) uint64 {
+	return (uint64(dev) >> 24) & 0xff
+}
+
+func getminor(dev int32) uint64 {
+	return uint64(dev) & 0xffffff
+}
+
+func makedev(major int, minor int) int {
+	return ((major << 24) | minor)
+}
diff --git a/vendor/github.com/containerd/continuity/devices/devices_dummy.go b/vendor/github.com/containerd/continuity/devices/devices_dummy.go
new file mode 100644
index 0000000..9a48330
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/devices/devices_dummy.go
@@ -0,0 +1,23 @@
+// +build solaris,!cgo
+
+//
+// Implementing the functions below requires cgo support.  Non-cgo stubs
+// versions are defined below to enable cross-compilation of source code
+// that depends on these functions, but the resultant cross-compiled
+// binaries cannot actually be used.  If the stub function(s) below are
+// actually invoked they will cause the calling process to exit.
+//
+
+package devices
+
+func getmajor(dev uint64) uint64 {
+	panic("getmajor() support requires cgo.")
+}
+
+func getminor(dev uint64) uint64 {
+	panic("getminor() support requires cgo.")
+}
+
+func makedev(major int, minor int) int {
+	panic("makedev() support requires cgo.")
+}
diff --git a/vendor/github.com/containerd/continuity/devices/devices_freebsd.go b/vendor/github.com/containerd/continuity/devices/devices_freebsd.go
new file mode 100644
index 0000000..a5c7b93
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/devices/devices_freebsd.go
@@ -0,0 +1,15 @@
+package devices
+
+// from /usr/include/sys/types.h
+
+func getmajor(dev uint32) uint64 {
+	return (uint64(dev) >> 24) & 0xff
+}
+
+func getminor(dev uint32) uint64 {
+	return uint64(dev) & 0xffffff
+}
+
+func makedev(major int, minor int) int {
+	return ((major << 24) | minor)
+}
diff --git a/vendor/github.com/containerd/continuity/devices/devices_linux.go b/vendor/github.com/containerd/continuity/devices/devices_linux.go
new file mode 100644
index 0000000..454cf66
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/devices/devices_linux.go
@@ -0,0 +1,15 @@
+package devices
+
+// from /usr/include/linux/kdev_t.h
+
+func getmajor(dev uint64) uint64 {
+	return dev >> 8
+}
+
+func getminor(dev uint64) uint64 {
+	return dev & 0xff
+}
+
+func makedev(major int, minor int) int {
+	return ((major << 8) | minor)
+}
diff --git a/vendor/github.com/containerd/continuity/devices/devices_solaris.go b/vendor/github.com/containerd/continuity/devices/devices_solaris.go
new file mode 100644
index 0000000..8819ac8
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/devices/devices_solaris.go
@@ -0,0 +1,18 @@
+// +build cgo
+
+package devices
+
+//#include <sys/mkdev.h>
+import "C"
+
+func getmajor(dev uint64) uint64 {
+	return uint64(C.major(C.dev_t(dev)))
+}
+
+func getminor(dev uint64) uint64 {
+	return uint64(C.minor(C.dev_t(dev)))
+}
+
+func makedev(major int, minor int) int {
+	return int(C.makedev(C.major_t(major), C.minor_t(minor)))
+}
diff --git a/vendor/github.com/containerd/continuity/devices/devices_unix.go b/vendor/github.com/containerd/continuity/devices/devices_unix.go
new file mode 100644
index 0000000..85e9a68
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/devices/devices_unix.go
@@ -0,0 +1,55 @@
+// +build linux darwin freebsd solaris
+
+package devices
+
+import (
+	"fmt"
+	"os"
+	"syscall"
+)
+
+func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
+	sys, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return 0, 0, fmt.Errorf("cannot extract device from os.FileInfo")
+	}
+
+	return getmajor(sys.Rdev), getminor(sys.Rdev), nil
+}
+
+// mknod provides a shortcut for syscall.Mknod
+func Mknod(p string, mode os.FileMode, maj, min int) error {
+	var (
+		m   = syscallMode(mode.Perm())
+		dev int
+	)
+
+	if mode&os.ModeDevice != 0 {
+		dev = makedev(maj, min)
+
+		if mode&os.ModeCharDevice != 0 {
+			m |= syscall.S_IFCHR
+		} else {
+			m |= syscall.S_IFBLK
+		}
+	} else if mode&os.ModeNamedPipe != 0 {
+		m |= syscall.S_IFIFO
+	}
+
+	return syscall.Mknod(p, m, dev)
+}
+
+// syscallMode returns the syscall-specific mode bits from Go's portable mode bits.
+func syscallMode(i os.FileMode) (o uint32) {
+	o |= uint32(i.Perm())
+	if i&os.ModeSetuid != 0 {
+		o |= syscall.S_ISUID
+	}
+	if i&os.ModeSetgid != 0 {
+		o |= syscall.S_ISGID
+	}
+	if i&os.ModeSticky != 0 {
+		o |= syscall.S_ISVTX
+	}
+	return
+}
diff --git a/vendor/github.com/containerd/continuity/devices/devices_windows.go b/vendor/github.com/containerd/continuity/devices/devices_windows.go
new file mode 100644
index 0000000..6099d1d
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/devices/devices_windows.go
@@ -0,0 +1,11 @@
+package devices
+
+import (
+	"os"
+
+	"github.com/pkg/errors"
+)
+
+func DeviceInfo(fi os.FileInfo) (uint64, uint64, error) {
+	return 0, 0, errors.Wrap(ErrNotSupported, "cannot get device info on windows")
+}
diff --git a/vendor/github.com/containerd/continuity/driver/driver.go b/vendor/github.com/containerd/continuity/driver/driver.go
new file mode 100644
index 0000000..aa1dd7d
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/driver/driver.go
@@ -0,0 +1,162 @@
+package driver
+
+import (
+	"fmt"
+	"io"
+	"os"
+)
+
+var ErrNotSupported = fmt.Errorf("not supported")
+
+// Driver provides all of the system-level functions in a common interface.
+// The context should call these with full paths and should never use the `os`
+// package or any other package to access resources on the filesystem. This
+// mechanism let's us carefully control access to the context and maintain
+// path and resource integrity. It also gives us an interface to reason about
+// direct resource access.
+//
+// Implementations don't need to do much other than meet the interface. For
+// example, it is not required to wrap os.FileInfo to return correct paths for
+// the call to Name().
+type Driver interface {
+	// Note that Open() returns a File interface instead of *os.File. This
+	// is because os.File is a struct, so if Open was to return *os.File,
+	// the only way to fulfill the interface would be to call os.Open()
+	Open(path string) (File, error)
+	OpenFile(path string, flag int, perm os.FileMode) (File, error)
+
+	Stat(path string) (os.FileInfo, error)
+	Lstat(path string) (os.FileInfo, error)
+	Readlink(p string) (string, error)
+	Mkdir(path string, mode os.FileMode) error
+	Remove(path string) error
+
+	Link(oldname, newname string) error
+	Lchmod(path string, mode os.FileMode) error
+	Lchown(path string, uid, gid int64) error
+	Symlink(oldname, newname string) error
+
+	MkdirAll(path string, perm os.FileMode) error
+	RemoveAll(path string) error
+
+	// TODO(aaronl): These methods might move outside the main Driver
+	// interface in the future as more platforms are added.
+	Mknod(path string, mode os.FileMode, major int, minor int) error
+	Mkfifo(path string, mode os.FileMode) error
+}
+
+// File is the interface for interacting with files returned by continuity's Open
+// This is needed since os.File is a struct, instead of an interface, so it can't
+// be used.
+type File interface {
+	io.ReadWriteCloser
+	io.Seeker
+	Readdir(n int) ([]os.FileInfo, error)
+}
+
+func NewSystemDriver() (Driver, error) {
+	// TODO(stevvooe): Consider having this take a "hint" path argument, which
+	// would be the context root. The hint could be used to resolve required
+	// filesystem support when assembling the driver to use.
+	return &driver{}, nil
+}
+
+// XAttrDriver should be implemented on operation systems and filesystems that
+// have xattr support for regular files and directories.
+type XAttrDriver interface {
+	// Getxattr returns all of the extended attributes for the file at path.
+	// Typically, this takes a syscall call to Listxattr and Getxattr.
+	Getxattr(path string) (map[string][]byte, error)
+
+	// Setxattr sets all of the extended attributes on file at path, following
+	// any symbolic links, if necessary. All attributes on the target are
+	// replaced by the values from attr. If the operation fails to set any
+	// attribute, those already applied will not be rolled back.
+	Setxattr(path string, attr map[string][]byte) error
+}
+
+// LXAttrDriver should be implemented by drivers on operating systems and
+// filesystems that support setting and getting extended attributes on
+// symbolic links. If this is not implemented, extended attributes will be
+// ignored on symbolic links.
+type LXAttrDriver interface {
+	// LGetxattr returns all of the extended attributes for the file at path
+	// and does not follow symlinks. Typically, this takes a syscall call to
+	// Llistxattr and Lgetxattr.
+	LGetxattr(path string) (map[string][]byte, error)
+
+	// LSetxattr sets all of the extended attributes on file at path, without
+	// following symbolic links. All attributes on the target are replaced by
+	// the values from attr. If the operation fails to set any attribute,
+	// those already applied will not be rolled back.
+	LSetxattr(path string, attr map[string][]byte) error
+}
+
+type DeviceInfoDriver interface {
+	DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error)
+}
+
+// driver is a simple default implementation that sends calls out to the "os"
+// package. Extend the "driver" type in system-specific files to add support,
+// such as xattrs, which can add support at compile time.
+type driver struct{}
+
+var _ File = &os.File{}
+
+// LocalDriver is the exported Driver struct for convenience.
+var LocalDriver Driver = &driver{}
+
+func (d *driver) Open(p string) (File, error) {
+	return os.Open(p)
+}
+
+func (d *driver) OpenFile(path string, flag int, perm os.FileMode) (File, error) {
+	return os.OpenFile(path, flag, perm)
+}
+
+func (d *driver) Stat(p string) (os.FileInfo, error) {
+	return os.Stat(p)
+}
+
+func (d *driver) Lstat(p string) (os.FileInfo, error) {
+	return os.Lstat(p)
+}
+
+func (d *driver) Readlink(p string) (string, error) {
+	return os.Readlink(p)
+}
+
+func (d *driver) Mkdir(p string, mode os.FileMode) error {
+	return os.Mkdir(p, mode)
+}
+
+// Remove is used to unlink files and remove directories.
+// This is following the golang os package api which
+// combines the operations into a higher level Remove
+// function. If explicit unlinking or directory removal
+// to mirror system call is required, they should be
+// split up at that time.
+func (d *driver) Remove(path string) error {
+	return os.Remove(path)
+}
+
+func (d *driver) Link(oldname, newname string) error {
+	return os.Link(oldname, newname)
+}
+
+func (d *driver) Lchown(name string, uid, gid int64) error {
+	// TODO: error out if uid excesses int bit width?
+	return os.Lchown(name, int(uid), int(gid))
+}
+
+func (d *driver) Symlink(oldname, newname string) error {
+	return os.Symlink(oldname, newname)
+}
+
+func (d *driver) MkdirAll(path string, perm os.FileMode) error {
+	return os.MkdirAll(path, perm)
+}
+
+func (d *driver) RemoveAll(path string) error {
+	return os.RemoveAll(path)
+}
diff --git a/vendor/github.com/containerd/continuity/driver/driver_unix.go b/vendor/github.com/containerd/continuity/driver/driver_unix.go
new file mode 100644
index 0000000..d9ab165
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/driver/driver_unix.go
@@ -0,0 +1,122 @@
+// +build linux darwin freebsd solaris
+
+package driver
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+	"sort"
+
+	"github.com/containerd/continuity/devices"
+	"github.com/containerd/continuity/sysx"
+)
+
+func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
+	return devices.Mknod(path, mode, major, minor)
+}
+
+func (d *driver) Mkfifo(path string, mode os.FileMode) error {
+	if mode&os.ModeNamedPipe == 0 {
+		return errors.New("mode passed to Mkfifo does not have the named pipe bit set")
+	}
+	// mknod with a mode that has ModeNamedPipe set creates a fifo, not a
+	// device.
+	return devices.Mknod(path, mode, 0, 0)
+}
+
+// Lchmod changes the mode of an file not following symlinks.
+func (d *driver) Lchmod(path string, mode os.FileMode) (err error) {
+	if !filepath.IsAbs(path) {
+		path, err = filepath.Abs(path)
+		if err != nil {
+			return
+		}
+	}
+
+	return sysx.Fchmodat(0, path, uint32(mode), sysx.AtSymlinkNofollow)
+}
+
+// Getxattr returns all of the extended attributes for the file at path p.
+func (d *driver) Getxattr(p string) (map[string][]byte, error) {
+	xattrs, err := sysx.Listxattr(p)
+	if err != nil {
+		return nil, fmt.Errorf("listing %s xattrs: %v", p, err)
+	}
+
+	sort.Strings(xattrs)
+	m := make(map[string][]byte, len(xattrs))
+
+	for _, attr := range xattrs {
+		value, err := sysx.Getxattr(p, attr)
+		if err != nil {
+			return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err)
+		}
+
+		// NOTE(stevvooe): This append/copy tricky relies on unique
+		// xattrs. Break this out into an alloc/copy if xattrs are no
+		// longer unique.
+		m[attr] = append(m[attr], value...)
+	}
+
+	return m, nil
+}
+
+// Setxattr sets all of the extended attributes on file at path, following
+// any symbolic links, if necessary. All attributes on the target are
+// replaced by the values from attr. If the operation fails to set any
+// attribute, those already applied will not be rolled back.
+func (d *driver) Setxattr(path string, attrMap map[string][]byte) error {
+	for attr, value := range attrMap {
+		if err := sysx.Setxattr(path, attr, value, 0); err != nil {
+			return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err)
+		}
+	}
+
+	return nil
+}
+
+// LGetxattr returns all of the extended attributes for the file at path p
+// not following symbolic links.
+func (d *driver) LGetxattr(p string) (map[string][]byte, error) {
+	xattrs, err := sysx.LListxattr(p)
+	if err != nil {
+		return nil, fmt.Errorf("listing %s xattrs: %v", p, err)
+	}
+
+	sort.Strings(xattrs)
+	m := make(map[string][]byte, len(xattrs))
+
+	for _, attr := range xattrs {
+		value, err := sysx.LGetxattr(p, attr)
+		if err != nil {
+			return nil, fmt.Errorf("getting %q xattr on %s: %v", attr, p, err)
+		}
+
+		// NOTE(stevvooe): This append/copy tricky relies on unique
+		// xattrs. Break this out into an alloc/copy if xattrs are no
+		// longer unique.
+		m[attr] = append(m[attr], value...)
+	}
+
+	return m, nil
+}
+
+// LSetxattr sets all of the extended attributes on file at path, not
+// following any symbolic links. All attributes on the target are
+// replaced by the values from attr. If the operation fails to set any
+// attribute, those already applied will not be rolled back.
+func (d *driver) LSetxattr(path string, attrMap map[string][]byte) error {
+	for attr, value := range attrMap {
+		if err := sysx.LSetxattr(path, attr, value, 0); err != nil {
+			return fmt.Errorf("error setting xattr %q on %s: %v", attr, path, err)
+		}
+	}
+
+	return nil
+}
+
+func (d *driver) DeviceInfo(fi os.FileInfo) (maj uint64, min uint64, err error) {
+	return devices.DeviceInfo(fi)
+}
diff --git a/vendor/github.com/containerd/continuity/driver/driver_windows.go b/vendor/github.com/containerd/continuity/driver/driver_windows.go
new file mode 100644
index 0000000..e4cfa64
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/driver/driver_windows.go
@@ -0,0 +1,21 @@
+package driver
+
+import (
+	"os"
+
+	"github.com/pkg/errors"
+)
+
+func (d *driver) Mknod(path string, mode os.FileMode, major, minor int) error {
+	return errors.Wrap(ErrNotSupported, "cannot create device node on Windows")
+}
+
+func (d *driver) Mkfifo(path string, mode os.FileMode) error {
+	return errors.Wrap(ErrNotSupported, "cannot create fifo on Windows")
+}
+
+// Lchmod changes the mode of an file not following symlinks.
+func (d *driver) Lchmod(path string, mode os.FileMode) (err error) {
+	// TODO: Use Window's equivalent
+	return os.Chmod(path, mode)
+}
diff --git a/vendor/github.com/containerd/continuity/driver/utils.go b/vendor/github.com/containerd/continuity/driver/utils.go
new file mode 100644
index 0000000..9e0edd7
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/driver/utils.go
@@ -0,0 +1,74 @@
+package driver
+
+import (
+	"io"
+	"io/ioutil"
+	"os"
+	"sort"
+)
+
+// ReadFile works the same as ioutil.ReadFile with the Driver abstraction
+func ReadFile(r Driver, filename string) ([]byte, error) {
+	f, err := r.Open(filename)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	data, err := ioutil.ReadAll(f)
+	if err != nil {
+		return nil, err
+	}
+
+	return data, nil
+}
+
+// WriteFile works the same as ioutil.WriteFile with the Driver abstraction
+func WriteFile(r Driver, filename string, data []byte, perm os.FileMode) error {
+	f, err := r.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+
+	n, err := f.Write(data)
+	if err != nil {
+		return err
+	} else if n != len(data) {
+		return io.ErrShortWrite
+	}
+
+	return nil
+}
+
+// ReadDir works the same as ioutil.ReadDir with the Driver abstraction
+func ReadDir(r Driver, dirname string) ([]os.FileInfo, error) {
+	f, err := r.Open(dirname)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	dirs, err := f.Readdir(-1)
+	if err != nil {
+		return nil, err
+	}
+
+	sort.Sort(fileInfos(dirs))
+	return dirs, nil
+}
+
+// Simple implementation of the sort.Interface for os.FileInfo
+type fileInfos []os.FileInfo
+
+func (fis fileInfos) Len() int {
+	return len(fis)
+}
+
+func (fis fileInfos) Less(i, j int) bool {
+	return fis[i].Name() < fis[j].Name()
+}
+
+func (fis fileInfos) Swap(i, j int) {
+	fis[i], fis[j] = fis[j], fis[i]
+}
diff --git a/vendor/github.com/containerd/continuity/pathdriver/path_driver.go b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go
new file mode 100644
index 0000000..b43d55f
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/pathdriver/path_driver.go
@@ -0,0 +1,85 @@
+package pathdriver
+
+import (
+	"path/filepath"
+)
+
+// PathDriver provides all of the path manipulation functions in a common
+// interface. The context should call these and never use the `filepath`
+// package or any other package to manipulate paths.
+type PathDriver interface {
+	Join(paths ...string) string
+	IsAbs(path string) bool
+	Rel(base, target string) (string, error)
+	Base(path string) string
+	Dir(path string) string
+	Clean(path string) string
+	Split(path string) (dir, file string)
+	Separator() byte
+	Abs(path string) (string, error)
+	Walk(string, filepath.WalkFunc) error
+	FromSlash(path string) string
+	ToSlash(path string) string
+	Match(pattern, name string) (matched bool, err error)
+}
+
+// pathDriver is a simple default implementation calls the filepath package.
+type pathDriver struct{}
+
+// LocalPathDriver is the exported pathDriver struct for convenience.
+var LocalPathDriver PathDriver = &pathDriver{}
+
+func (*pathDriver) Join(paths ...string) string {
+	return filepath.Join(paths...)
+}
+
+func (*pathDriver) IsAbs(path string) bool {
+	return filepath.IsAbs(path)
+}
+
+func (*pathDriver) Rel(base, target string) (string, error) {
+	return filepath.Rel(base, target)
+}
+
+func (*pathDriver) Base(path string) string {
+	return filepath.Base(path)
+}
+
+func (*pathDriver) Dir(path string) string {
+	return filepath.Dir(path)
+}
+
+func (*pathDriver) Clean(path string) string {
+	return filepath.Clean(path)
+}
+
+func (*pathDriver) Split(path string) (dir, file string) {
+	return filepath.Split(path)
+}
+
+func (*pathDriver) Separator() byte {
+	return filepath.Separator
+}
+
+func (*pathDriver) Abs(path string) (string, error) {
+	return filepath.Abs(path)
+}
+
+// Note that filepath.Walk calls os.Stat, so if the context wants to
+// to call Driver.Stat() for Walk, they need to create a new struct that
+// overrides this method.
+func (*pathDriver) Walk(root string, walkFn filepath.WalkFunc) error {
+	return filepath.Walk(root, walkFn)
+}
+
+func (*pathDriver) FromSlash(path string) string {
+	return filepath.FromSlash(path)
+}
+
+func (*pathDriver) ToSlash(path string) string {
+	return filepath.ToSlash(path)
+}
+
+func (*pathDriver) Match(pattern, name string) (bool, error) {
+	return filepath.Match(pattern, name)
+}
diff --git a/vendor/github.com/stevvooe/continuity/sysx/asm.s b/vendor/github.com/containerd/continuity/sysx/asm.s
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/asm.s
rename to vendor/github.com/containerd/continuity/sysx/asm.s
diff --git a/vendor/github.com/stevvooe/continuity/sysx/chmod_darwin.go b/vendor/github.com/containerd/continuity/sysx/chmod_darwin.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/chmod_darwin.go
rename to vendor/github.com/containerd/continuity/sysx/chmod_darwin.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/chmod_darwin_386.go b/vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/chmod_darwin_386.go
rename to vendor/github.com/containerd/continuity/sysx/chmod_darwin_386.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/chmod_darwin_amd64.go b/vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/chmod_darwin_amd64.go
rename to vendor/github.com/containerd/continuity/sysx/chmod_darwin_amd64.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/chmod_freebsd.go b/vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/chmod_freebsd.go
rename to vendor/github.com/containerd/continuity/sysx/chmod_freebsd.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/chmod_freebsd_amd64.go b/vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/chmod_freebsd_amd64.go
rename to vendor/github.com/containerd/continuity/sysx/chmod_freebsd_amd64.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/chmod_linux.go b/vendor/github.com/containerd/continuity/sysx/chmod_linux.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/chmod_linux.go
rename to vendor/github.com/containerd/continuity/sysx/chmod_linux.go
diff --git a/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go b/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go
new file mode 100644
index 0000000..3ba6e5e
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/chmod_solaris.go
@@ -0,0 +1,11 @@
+package sysx
+
+import "golang.org/x/sys/unix"
+
+const (
+	AtSymlinkNofollow = unix.AT_SYMLINK_NOFOLLOW
+)
+
+func Fchmodat(dirfd int, path string, mode uint32, flags int) error {
+	return unix.Fchmodat(dirfd, path, mode, flags)
+}
diff --git a/vendor/github.com/containerd/continuity/sysx/copy_linux.go b/vendor/github.com/containerd/continuity/sysx/copy_linux.go
new file mode 100644
index 0000000..4d85812
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/copy_linux.go
@@ -0,0 +1,11 @@
+package sysx
+
+// These functions will be generated by generate.sh
+//    $ GOOS=linux GOARCH=386 ./generate.sh copy
+//    $ GOOS=linux GOARCH=amd64 ./generate.sh copy
+//    $ GOOS=linux GOARCH=arm ./generate.sh copy
+//    $ GOOS=linux GOARCH=arm64 ./generate.sh copy
+//    $ GOOS=linux GOARCH=ppc64le ./generate.sh copy
+//    $ GOOS=linux GOARCH=s390x ./generate.sh copy
+
+//sys CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error)
diff --git a/vendor/github.com/stevvooe/continuity/sysx/copy_linux_386.go b/vendor/github.com/containerd/continuity/sysx/copy_linux_386.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/copy_linux_386.go
rename to vendor/github.com/containerd/continuity/sysx/copy_linux_386.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/copy_linux_amd64.go b/vendor/github.com/containerd/continuity/sysx/copy_linux_amd64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/copy_linux_amd64.go
rename to vendor/github.com/containerd/continuity/sysx/copy_linux_amd64.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/copy_linux_arm.go b/vendor/github.com/containerd/continuity/sysx/copy_linux_arm.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/copy_linux_arm.go
rename to vendor/github.com/containerd/continuity/sysx/copy_linux_arm.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/copy_linux_arm64.go b/vendor/github.com/containerd/continuity/sysx/copy_linux_arm64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/copy_linux_arm64.go
rename to vendor/github.com/containerd/continuity/sysx/copy_linux_arm64.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/copy_linux_amd64.go b/vendor/github.com/containerd/continuity/sysx/copy_linux_ppc64le.go
similarity index 100%
copy from vendor/github.com/stevvooe/continuity/sysx/copy_linux_amd64.go
copy to vendor/github.com/containerd/continuity/sysx/copy_linux_ppc64le.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/copy_linux_amd64.go b/vendor/github.com/containerd/continuity/sysx/copy_linux_s390x.go
similarity index 100%
copy from vendor/github.com/stevvooe/continuity/sysx/copy_linux_amd64.go
copy to vendor/github.com/containerd/continuity/sysx/copy_linux_s390x.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/nodata_linux.go b/vendor/github.com/containerd/continuity/sysx/nodata_linux.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/nodata_linux.go
rename to vendor/github.com/containerd/continuity/sysx/nodata_linux.go
diff --git a/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go
new file mode 100644
index 0000000..53cc8e0
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/nodata_solaris.go
@@ -0,0 +1,8 @@
+package sysx
+
+import (
+	"syscall"
+)
+
+// This should actually be a set that contains ENOENT and EPERM
+const ENODATA = syscall.ENOENT
diff --git a/vendor/github.com/stevvooe/continuity/sysx/nodata_unix.go b/vendor/github.com/containerd/continuity/sysx/nodata_unix.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/nodata_unix.go
rename to vendor/github.com/containerd/continuity/sysx/nodata_unix.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/sys.go b/vendor/github.com/containerd/continuity/sysx/sys.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/sys.go
rename to vendor/github.com/containerd/continuity/sysx/sys.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_386.go b/vendor/github.com/containerd/continuity/sysx/sysnum_linux_386.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_386.go
rename to vendor/github.com/containerd/continuity/sysx/sysnum_linux_386.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_amd64.go b/vendor/github.com/containerd/continuity/sysx/sysnum_linux_amd64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_amd64.go
rename to vendor/github.com/containerd/continuity/sysx/sysnum_linux_amd64.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_arm.go b/vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_arm.go
rename to vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_arm64.go b/vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/sysnum_linux_arm64.go
rename to vendor/github.com/containerd/continuity/sysx/sysnum_linux_arm64.go
diff --git a/vendor/github.com/containerd/continuity/sysx/sysnum_linux_ppc64le.go b/vendor/github.com/containerd/continuity/sysx/sysnum_linux_ppc64le.go
new file mode 100644
index 0000000..5dea25a
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/sysnum_linux_ppc64le.go
@@ -0,0 +1,7 @@
+package sysx
+
+const (
+	// SYS_COPYFILERANGE defined in Kernel 4.5+
+	// Number defined in /usr/include/asm/unistd_64.h
+	SYS_COPY_FILE_RANGE = 379
+)
diff --git a/vendor/github.com/containerd/continuity/sysx/sysnum_linux_s390x.go b/vendor/github.com/containerd/continuity/sysx/sysnum_linux_s390x.go
new file mode 100644
index 0000000..8a6f2a7
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/sysnum_linux_s390x.go
@@ -0,0 +1,7 @@
+package sysx
+
+const (
+	// SYS_COPYFILERANGE defined in Kernel 4.5+
+	// Number defined in /usr/include/asm/unistd_64.h
+	SYS_COPY_FILE_RANGE = 375
+)
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr.go b/vendor/github.com/containerd/continuity/sysx/xattr.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr.go
rename to vendor/github.com/containerd/continuity/sysx/xattr.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_darwin.go b/vendor/github.com/containerd/continuity/sysx/xattr_darwin.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_darwin.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_darwin.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_darwin_386.go b/vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_darwin_386.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_darwin_386.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_darwin_amd64.go b/vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_darwin_amd64.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_darwin_amd64.go
diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go b/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go
new file mode 100644
index 0000000..e8017d3
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/xattr_freebsd.go
@@ -0,0 +1,12 @@
+package sysx
+
+import (
+	"errors"
+)
+
+// Initial stub version for FreeBSD. FreeBSD has a different
+// syscall API from Darwin and Linux for extended attributes;
+// it is also not widely used. It is not exposed at all by the
+// Go syscall package, so we need to implement directly eventually.
+
+var unsupported = errors.New("extended attributes unsupported on FreeBSD")
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_linux.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_linux.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_linux.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_linux_386.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux_386.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_linux_386.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_linux_386.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_linux_amd64.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux_amd64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_linux_amd64.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_linux_amd64.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_linux_arm.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux_arm.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_linux_arm.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_linux_arm.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_linux_arm64.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux_arm64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_linux_arm64.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_linux_arm64.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_linux_ppc64.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_linux_ppc64.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_linux_ppc64le.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64le.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_linux_ppc64le.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_linux_ppc64le.go
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_linux_s390x.go b/vendor/github.com/containerd/continuity/sysx/xattr_linux_s390x.go
similarity index 100%
rename from vendor/github.com/stevvooe/continuity/sysx/xattr_linux_s390x.go
rename to vendor/github.com/containerd/continuity/sysx/xattr_linux_s390x.go
diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go b/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go
new file mode 100644
index 0000000..fc523fc
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/xattr_solaris.go
@@ -0,0 +1,12 @@
+package sysx
+
+import (
+	"errors"
+)
+
+// Initial stub version for Solaris. Solaris has a different
+// syscall API from Darwin and Linux for extended attributes;
+// it is also not widely used. It is not exposed at all by the
+// Go syscall package, so we need to implement directly eventually.
+
+var unsupported = errors.New("extended attributes unsupported on Solaris")
diff --git a/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go
new file mode 100644
index 0000000..a8dd9f2
--- /dev/null
+++ b/vendor/github.com/containerd/continuity/sysx/xattr_unsupported.go
@@ -0,0 +1,44 @@
+// +build freebsd solaris
+
+package sysx
+
+// Listxattr calls syscall listxattr and reads all content
+// and returns a string array
+func Listxattr(path string) ([]string, error) {
+	return []string{}, nil
+}
+
+// Removexattr calls syscall removexattr
+func Removexattr(path string, attr string) (err error) {
+	return unsupported
+}
+
+// Setxattr calls syscall setxattr
+func Setxattr(path string, attr string, data []byte, flags int) (err error) {
+	return unsupported
+}
+
+// Getxattr calls syscall getxattr
+func Getxattr(path, attr string) ([]byte, error) {
+	return []byte{}, unsupported
+}
+
+// LListxattr lists xattrs, not following symlinks
+func LListxattr(path string) ([]string, error) {
+	return []string{}, nil
+}
+
+// LRemovexattr removes an xattr, not following symlinks
+func LRemovexattr(path string, attr string) (err error) {
+	return unsupported
+}
+
+// LSetxattr sets an xattr, not following symlinks
+func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
+	return unsupported
+}
+
+// LGetxattr gets an xattr, not following symlinks
+func LGetxattr(path, attr string) ([]byte, error) {
+	return []byte{}, nil
+}
diff --git a/vendor/github.com/containerd/fifo/LICENSE b/vendor/github.com/containerd/fifo/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/containerd/fifo/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/containerd/fifo/fifo.go b/vendor/github.com/containerd/fifo/fifo.go
new file mode 100644
index 0000000..8d9bc6a
--- /dev/null
+++ b/vendor/github.com/containerd/fifo/fifo.go
@@ -0,0 +1,220 @@
+package fifo
+
+import (
+	"io"
+	"os"
+	"runtime"
+	"sync"
+	"syscall"
+
+	"github.com/pkg/errors"
+	"golang.org/x/net/context"
+)
+
+type fifo struct {
+	flag        int
+	opened      chan struct{}
+	closed      chan struct{}
+	closing     chan struct{}
+	err         error
+	file        *os.File
+	closingOnce sync.Once // close has been called
+	closedOnce  sync.Once // fifo is closed
+	handle      *handle
+}
+
+var leakCheckWg *sync.WaitGroup
+
+// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
+// Context can be used to cancel this function until open(2) has not returned.
+// Accepted flags:
+// - syscall.O_CREAT - create new fifo if one doesn't exist
+// - syscall.O_RDONLY - open fifo only from reader side
+// - syscall.O_WRONLY - open fifo only from writer side
+// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
+// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
+//     fifo isn't open. read/write will be connected after the actual fifo is
+//     open or after fifo is closed.
+func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
+	if _, err := os.Stat(fn); err != nil {
+		if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 {
+			if err := mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) {
+				return nil, errors.Wrapf(err, "error creating fifo %v", fn)
+			}
+		} else {
+			return nil, err
+		}
+	}
+
+	block := flag&syscall.O_NONBLOCK == 0 || flag&syscall.O_RDWR != 0
+
+	flag &= ^syscall.O_CREAT
+	flag &= ^syscall.O_NONBLOCK
+
+	h, err := getHandle(fn)
+	if err != nil {
+		return nil, err
+	}
+
+	f := &fifo{
+		handle:  h,
+		flag:    flag,
+		opened:  make(chan struct{}),
+		closed:  make(chan struct{}),
+		closing: make(chan struct{}),
+	}
+
+	wg := leakCheckWg
+	if wg != nil {
+		wg.Add(2)
+	}
+
+	go func() {
+		if wg != nil {
+			defer wg.Done()
+		}
+		select {
+		case <-ctx.Done():
+			select {
+			case <-f.opened:
+			default:
+				f.Close()
+			}
+		case <-f.opened:
+		case <-f.closed:
+		}
+	}()
+	go func() {
+		if wg != nil {
+			defer wg.Done()
+		}
+		var file *os.File
+		fn, err := h.Path()
+		if err == nil {
+			file, err = os.OpenFile(fn, flag, 0)
+		}
+		select {
+		case <-f.closing:
+			if err == nil {
+				select {
+				case <-ctx.Done():
+					err = ctx.Err()
+				default:
+					err = errors.Errorf("fifo %v was closed before opening", h.Name())
+				}
+				if file != nil {
+					file.Close()
+				}
+			}
+		default:
+		}
+		if err != nil {
+			f.closedOnce.Do(func() {
+				f.err = err
+				close(f.closed)
+			})
+			return
+		}
+		f.file = file
+		close(f.opened)
+	}()
+	if block {
+		select {
+		case <-f.opened:
+		case <-f.closed:
+			return nil, f.err
+		}
+	}
+	return f, nil
+}
+
+// Read from a fifo to a byte array.
+func (f *fifo) Read(b []byte) (int, error) {
+	if f.flag&syscall.O_WRONLY > 0 {
+		return 0, errors.New("reading from write-only fifo")
+	}
+	select {
+	case <-f.opened:
+		return f.file.Read(b)
+	default:
+	}
+	select {
+	case <-f.opened:
+		return f.file.Read(b)
+	case <-f.closed:
+		return 0, errors.New("reading from a closed fifo")
+	}
+}
+
+// Write from byte array to a fifo.
+func (f *fifo) Write(b []byte) (int, error) {
+	if f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 {
+		return 0, errors.New("writing to read-only fifo")
+	}
+	select {
+	case <-f.opened:
+		return f.file.Write(b)
+	default:
+	}
+	select {
+	case <-f.opened:
+		return f.file.Write(b)
+	case <-f.closed:
+		return 0, errors.New("writing to a closed fifo")
+	}
+}
+
+// Close the fifo. Next reads/writes will error. This method can also be used
+// before open(2) has returned and fifo was never opened.
+func (f *fifo) Close() (retErr error) {
+	for {
+		select {
+		case <-f.closed:
+			f.handle.Close()
+			return
+		default:
+			select {
+			case <-f.opened:
+				f.closedOnce.Do(func() {
+					retErr = f.file.Close()
+					f.err = retErr
+					close(f.closed)
+				})
+			default:
+				if f.flag&syscall.O_RDWR != 0 {
+					runtime.Gosched()
+					break
+				}
+				f.closingOnce.Do(func() {
+					close(f.closing)
+				})
+				reverseMode := syscall.O_WRONLY
+				if f.flag&syscall.O_WRONLY > 0 {
+					reverseMode = syscall.O_RDONLY
+				}
+				fn, err := f.handle.Path()
+				// if Close() is called concurrently(shouldn't) it may cause error
+				// because handle is closed
+				select {
+				case <-f.closed:
+				default:
+					if err != nil {
+						// Path has become invalid. We will leak a goroutine.
+						// This case should not happen in linux.
+						f.closedOnce.Do(func() {
+							f.err = err
+							close(f.closed)
+						})
+						<-f.closed
+						break
+					}
+					f, err := os.OpenFile(fn, reverseMode|syscall.O_NONBLOCK, 0)
+					if err == nil {
+						f.Close()
+					}
+					runtime.Gosched()
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/containerd/fifo/handle_linux.go b/vendor/github.com/containerd/fifo/handle_linux.go
new file mode 100644
index 0000000..8b9c663
--- /dev/null
+++ b/vendor/github.com/containerd/fifo/handle_linux.go
@@ -0,0 +1,81 @@
+// +build linux
+
+package fifo
+
+import (
+	"fmt"
+	"os"
+	"sync"
+	"syscall"
+
+	"github.com/pkg/errors"
+)
+
+const O_PATH = 010000000
+
+type handle struct {
+	f         *os.File
+	fd        uintptr
+	dev       uint64
+	ino       uint64
+	closeOnce sync.Once
+	name      string
+}
+
+func getHandle(fn string) (*handle, error) {
+	f, err := os.OpenFile(fn, O_PATH, 0)
+	if err != nil {
+		return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn)
+	}
+
+	var (
+		stat syscall.Stat_t
+		fd   = f.Fd()
+	)
+	if err := syscall.Fstat(int(fd), &stat); err != nil {
+		f.Close()
+		return nil, errors.Wrapf(err, "failed to stat handle %v", fd)
+	}
+
+	h := &handle{
+		f:    f,
+		name: fn,
+		dev:  uint64(stat.Dev),
+		ino:  stat.Ino,
+		fd:   fd,
+	}
+
+	// check /proc just in case
+	if _, err := os.Stat(h.procPath()); err != nil {
+		f.Close()
+		return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath())
+	}
+
+	return h, nil
+}
+
+func (h *handle) procPath() string {
+	return fmt.Sprintf("/proc/self/fd/%d", h.fd)
+}
+
+func (h *handle) Name() string {
+	return h.name
+}
+
+func (h *handle) Path() (string, error) {
+	var stat syscall.Stat_t
+	if err := syscall.Stat(h.procPath(), &stat); err != nil {
+		return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
+	}
+	if uint64(stat.Dev) != h.dev || stat.Ino != h.ino {
+		return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
+	}
+	return h.procPath(), nil
+}
+
+func (h *handle) Close() error {
+	h.closeOnce.Do(func() {
+		h.f.Close()
+	})
+	return nil
+}
diff --git a/vendor/github.com/containerd/fifo/handle_nolinux.go b/vendor/github.com/containerd/fifo/handle_nolinux.go
new file mode 100644
index 0000000..f0c8485
--- /dev/null
+++ b/vendor/github.com/containerd/fifo/handle_nolinux.go
@@ -0,0 +1,49 @@
+// +build !linux
+
+package fifo
+
+import (
+	"syscall"
+
+	"github.com/pkg/errors"
+)
+
+type handle struct {
+	fn  string
+	dev uint64
+	ino uint64
+}
+
+func getHandle(fn string) (*handle, error) {
+	var stat syscall.Stat_t
+	if err := syscall.Stat(fn, &stat); err != nil {
+		return nil, errors.Wrapf(err, "failed to stat %v", fn)
+	}
+
+	h := &handle{
+		fn:  fn,
+		dev: uint64(stat.Dev),
+		ino: uint64(stat.Ino),
+	}
+
+	return h, nil
+}
+
+func (h *handle) Path() (string, error) {
+	var stat syscall.Stat_t
+	if err := syscall.Stat(h.fn, &stat); err != nil {
+		return "", errors.Wrapf(err, "path %v could not be statted", h.fn)
+	}
+	if uint64(stat.Dev) != h.dev || uint64(stat.Ino) != h.ino {
+		return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn)
+	}
+	return h.fn, nil
+}
+
+func (h *handle) Name() string {
+	return h.fn
+}
+
+func (h *handle) Close() error {
+	return nil
+}
diff --git a/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go b/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go
new file mode 100644
index 0000000..8c6ea45
--- /dev/null
+++ b/vendor/github.com/containerd/fifo/mkfifo_nosolaris.go
@@ -0,0 +1,9 @@
+// +build !solaris
+
+package fifo
+
+import "syscall"
+
+func mkfifo(path string, mode uint32) (err error) {
+	return syscall.Mkfifo(path, mode)
+}
diff --git a/vendor/github.com/containerd/fifo/mkfifo_solaris.go b/vendor/github.com/containerd/fifo/mkfifo_solaris.go
new file mode 100644
index 0000000..8d588a4
--- /dev/null
+++ b/vendor/github.com/containerd/fifo/mkfifo_solaris.go
@@ -0,0 +1,11 @@
+// +build solaris
+
+package fifo
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+func mkfifo(path string, mode uint32) (err error) {
+	return unix.Mkfifo(path, mode)
+}
diff --git a/vendor/github.com/containerd/fifo/readme.md b/vendor/github.com/containerd/fifo/readme.md
new file mode 100644
index 0000000..2b41b3b
--- /dev/null
+++ b/vendor/github.com/containerd/fifo/readme.md
@@ -0,0 +1,32 @@
+### fifo
+
+[![Build Status](https://travis-ci.org/containerd/fifo.svg?branch=master)](https://travis-ci.org/containerd/fifo)
+
+Go package for handling fifos in a sane way.
+
+```
+// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
+// Context can be used to cancel this function until open(2) has not returned.
+// Accepted flags:
+// - syscall.O_CREAT - create new fifo if one doesn't exist
+// - syscall.O_RDONLY - open fifo only from reader side
+// - syscall.O_WRONLY - open fifo only from writer side
+// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
+// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
+//     fifo isn't open. read/write will be connected after the actual fifo is
+//     open or after fifo is closed.
+func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error)
+
+
+// Read from a fifo to a byte array.
+func (f *fifo) Read(b []byte) (int, error)
+
+
+// Write from byte array to a fifo.
+func (f *fifo) Write(b []byte) (int, error)
+
+
+// Close the fifo. Next reads/writes will error. This method can also be used
+// before open(2) has returned and fifo was never opened.
+func (f *fifo) Close() error 
+```
diff --git a/vendor/github.com/containerd/go-runc/LICENSE b/vendor/github.com/containerd/go-runc/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/containerd/go-runc/README.md b/vendor/github.com/containerd/go-runc/README.md
new file mode 100644
index 0000000..239601f
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/README.md
@@ -0,0 +1,14 @@
+# go-runc
+
+[![Build Status](https://travis-ci.org/containerd/go-runc.svg?branch=master)](https://travis-ci.org/containerd/go-runc)
+
+
+This is a package for consuming the [runc](https://github.com/opencontainers/runc) binary in your Go applications.
+It tries to expose all the settings and features of the runc CLI.  If there is something missing then add it, its opensource!
+
+This needs runc @ [a9610f2c0](https://github.com/opencontainers/runc/commit/a9610f2c0237d2636d05a031ec8659a70e75ffeb)
+or greater.
+
+## Docs
+
+Docs can be found at [godoc.org](https://godoc.org/github.com/containerd/go-runc).
diff --git a/vendor/github.com/containerd/go-runc/command_linux.go b/vendor/github.com/containerd/go-runc/command_linux.go
new file mode 100644
index 0000000..8b4a502
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/command_linux.go
@@ -0,0 +1,23 @@
+package runc
+
+import (
+	"context"
+	"os/exec"
+	"syscall"
+)
+
+func (r *Runc) command(context context.Context, args ...string) *exec.Cmd {
+	command := r.Command
+	if command == "" {
+		command = DefaultCommand
+	}
+	cmd := exec.CommandContext(context, command, append(r.args(), args...)...)
+	cmd.SysProcAttr = &syscall.SysProcAttr{
+		Setpgid: r.Setpgid,
+	}
+	if r.PdeathSignal != 0 {
+		cmd.SysProcAttr.Pdeathsig = r.PdeathSignal
+	}
+
+	return cmd
+}
diff --git a/vendor/github.com/containerd/go-runc/command_other.go b/vendor/github.com/containerd/go-runc/command_other.go
new file mode 100644
index 0000000..bf03a2f
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/command_other.go
@@ -0,0 +1,16 @@
+// +build !linux
+
+package runc
+
+import (
+	"context"
+	"os/exec"
+)
+
+func (r *Runc) command(context context.Context, args ...string) *exec.Cmd {
+	command := r.Command
+	if command == "" {
+		command = DefaultCommand
+	}
+	return exec.CommandContext(context, command, append(r.args(), args...)...)
+}
diff --git a/vendor/github.com/containerd/go-runc/console.go b/vendor/github.com/containerd/go-runc/console.go
new file mode 100644
index 0000000..141e64d
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/console.go
@@ -0,0 +1,141 @@
+package runc
+
+import (
+	"fmt"
+	"io/ioutil"
+	"net"
+	"os"
+	"path/filepath"
+
+	"github.com/containerd/console"
+	"golang.org/x/sys/unix"
+)
+
+// NewConsoleSocket creates a new unix socket at the provided path to accept a
+// pty master created by runc for use by the container
+func NewConsoleSocket(path string) (*Socket, error) {
+	abs, err := filepath.Abs(path)
+	if err != nil {
+		return nil, err
+	}
+	addr, err := net.ResolveUnixAddr("unix", abs)
+	if err != nil {
+		return nil, err
+	}
+	l, err := net.ListenUnix("unix", addr)
+	if err != nil {
+		return nil, err
+	}
+	return &Socket{
+		l:    l,
+	}, nil
+}
+
+// NewTempConsoleSocket returns a temp console socket for use with a container
+// On Close(), the socket is deleted
+func NewTempConsoleSocket() (*Socket, error) {
+	dir, err := ioutil.TempDir("", "pty")
+	if err != nil {
+		return nil, err
+	}
+	abs, err := filepath.Abs(filepath.Join(dir, "pty.sock"))
+	if err != nil {
+		return nil, err
+	}
+	addr, err := net.ResolveUnixAddr("unix", abs)
+	if err != nil {
+		return nil, err
+	}
+	l, err := net.ListenUnix("unix", addr)
+	if err != nil {
+		return nil, err
+	}
+	return &Socket{
+		l:     l,
+		rmdir: true,
+	}, nil
+}
+
+// Socket is a unix socket that accepts the pty master created by runc
+type Socket struct {
+	rmdir bool
+	l     *net.UnixListener
+}
+
+// Path returns the path to the unix socket on disk
+func (c *Socket) Path() string {
+	return c.l.Addr().String()
+}
+
+// recvFd waits for a file descriptor to be sent over the given AF_UNIX
+// socket. The file name of the remote file descriptor will be recreated
+// locally (it is sent as non-auxiliary data in the same payload).
+func recvFd(socket *net.UnixConn) (*os.File, error) {
+	const MaxNameLen = 4096
+	var oobSpace = unix.CmsgSpace(4)
+
+	name := make([]byte, MaxNameLen)
+	oob := make([]byte, oobSpace)
+
+	n, oobn, _, _, err := socket.ReadMsgUnix(name, oob)
+	if err != nil {
+		return nil, err
+	}
+
+	if n >= MaxNameLen || oobn != oobSpace {
+		return nil, fmt.Errorf("recvfd: incorrect number of bytes read (n=%d oobn=%d)", n, oobn)
+	}
+
+	// Truncate.
+	name = name[:n]
+	oob = oob[:oobn]
+
+	scms, err := unix.ParseSocketControlMessage(oob)
+	if err != nil {
+		return nil, err
+	}
+	if len(scms) != 1 {
+		return nil, fmt.Errorf("recvfd: number of SCMs is not 1: %d", len(scms))
+	}
+	scm := scms[0]
+
+	fds, err := unix.ParseUnixRights(&scm)
+	if err != nil {
+		return nil, err
+	}
+	if len(fds) != 1 {
+		return nil, fmt.Errorf("recvfd: number of fds is not 1: %d", len(fds))
+	}
+	fd := uintptr(fds[0])
+
+	return os.NewFile(fd, string(name)), nil
+}
+
+// ReceiveMaster blocks until the socket receives the pty master
+func (c *Socket) ReceiveMaster() (console.Console, error) {
+	conn, err := c.l.Accept()
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+	uc, ok := conn.(*net.UnixConn)
+	if !ok {
+		return nil, fmt.Errorf("received connection which was not a unix socket")
+	}
+	f, err := recvFd(uc)
+	if err != nil {
+		return nil, err
+	}
+	return console.ConsoleFromFile(f)
+}
+
+// Close closes the unix socket
+func (c *Socket) Close() error {
+	err := c.l.Close()
+	if c.rmdir {
+		if rerr := os.RemoveAll(filepath.Dir(c.Path())); err == nil {
+			err = rerr
+		}
+	}
+	return err
+}
diff --git a/vendor/github.com/containerd/go-runc/container.go b/vendor/github.com/containerd/go-runc/container.go
new file mode 100644
index 0000000..7981aac
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/container.go
@@ -0,0 +1,14 @@
+package runc
+
+import "time"
+
+// Container hold information for a runc container
+type Container struct {
+	ID          string            `json:"id"`
+	Pid         int               `json:"pid"`
+	Status      string            `json:"status"`
+	Bundle      string            `json:"bundle"`
+	Rootfs      string            `json:"rootfs"`
+	Created     time.Time         `json:"created"`
+	Annotations map[string]string `json:"annotations"`
+}
diff --git a/vendor/github.com/containerd/go-runc/events.go b/vendor/github.com/containerd/go-runc/events.go
new file mode 100644
index 0000000..ca0e048
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/events.go
@@ -0,0 +1,84 @@
+package runc
+
+type Event struct {
+	// Type are the event type generated by runc
+	// If the type is "error" then check the Err field on the event for
+	// the actual error
+	Type  string `json:"type"`
+	ID    string `json:"id"`
+	Stats *Stats `json:"data,omitempty"`
+	// Err has a read error if we were unable to decode the event from runc
+	Err error `json:"-"`
+}
+
+type Stats struct {
+	Cpu     Cpu                `json:"cpu"`
+	Memory  Memory             `json:"memory"`
+	Pids    Pids               `json:"pids"`
+	Blkio   Blkio              `json:"blkio"`
+	Hugetlb map[string]Hugetlb `json:"hugetlb"`
+}
+
+type Hugetlb struct {
+	Usage   uint64 `json:"usage,omitempty"`
+	Max     uint64 `json:"max,omitempty"`
+	Failcnt uint64 `json:"failcnt"`
+}
+
+type BlkioEntry struct {
+	Major uint64 `json:"major,omitempty"`
+	Minor uint64 `json:"minor,omitempty"`
+	Op    string `json:"op,omitempty"`
+	Value uint64 `json:"value,omitempty"`
+}
+
+type Blkio struct {
+	IoServiceBytesRecursive []BlkioEntry `json:"ioServiceBytesRecursive,omitempty"`
+	IoServicedRecursive     []BlkioEntry `json:"ioServicedRecursive,omitempty"`
+	IoQueuedRecursive       []BlkioEntry `json:"ioQueueRecursive,omitempty"`
+	IoServiceTimeRecursive  []BlkioEntry `json:"ioServiceTimeRecursive,omitempty"`
+	IoWaitTimeRecursive     []BlkioEntry `json:"ioWaitTimeRecursive,omitempty"`
+	IoMergedRecursive       []BlkioEntry `json:"ioMergedRecursive,omitempty"`
+	IoTimeRecursive         []BlkioEntry `json:"ioTimeRecursive,omitempty"`
+	SectorsRecursive        []BlkioEntry `json:"sectorsRecursive,omitempty"`
+}
+
+type Pids struct {
+	Current uint64 `json:"current,omitempty"`
+	Limit   uint64 `json:"limit,omitempty"`
+}
+
+type Throttling struct {
+	Periods          uint64 `json:"periods,omitempty"`
+	ThrottledPeriods uint64 `json:"throttledPeriods,omitempty"`
+	ThrottledTime    uint64 `json:"throttledTime,omitempty"`
+}
+
+type CpuUsage struct {
+	// Units: nanoseconds.
+	Total  uint64   `json:"total,omitempty"`
+	Percpu []uint64 `json:"percpu,omitempty"`
+	Kernel uint64   `json:"kernel"`
+	User   uint64   `json:"user"`
+}
+
+type Cpu struct {
+	Usage      CpuUsage   `json:"usage,omitempty"`
+	Throttling Throttling `json:"throttling,omitempty"`
+}
+
+type MemoryEntry struct {
+	Limit   uint64 `json:"limit"`
+	Usage   uint64 `json:"usage,omitempty"`
+	Max     uint64 `json:"max,omitempty"`
+	Failcnt uint64 `json:"failcnt"`
+}
+
+type Memory struct {
+	Cache     uint64            `json:"cache,omitempty"`
+	Usage     MemoryEntry       `json:"usage,omitempty"`
+	Swap      MemoryEntry       `json:"swap,omitempty"`
+	Kernel    MemoryEntry       `json:"kernel,omitempty"`
+	KernelTCP MemoryEntry       `json:"kernelTCP,omitempty"`
+	Raw       map[string]uint64 `json:"raw,omitempty"`
+}
diff --git a/vendor/github.com/containerd/go-runc/io.go b/vendor/github.com/containerd/go-runc/io.go
new file mode 100644
index 0000000..0c3f1a9
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/io.go
@@ -0,0 +1,213 @@
+package runc
+
+import (
+	"io"
+	"os"
+	"os/exec"
+
+	"github.com/pkg/errors"
+	"golang.org/x/sys/unix"
+)
+
+type IO interface {
+	io.Closer
+	Stdin() io.WriteCloser
+	Stdout() io.ReadCloser
+	Stderr() io.ReadCloser
+	Set(*exec.Cmd)
+}
+
+type StartCloser interface {
+	CloseAfterStart() error
+}
+
+// NewPipeIO creates pipe pairs to be used with runc
+func NewPipeIO(uid, gid int) (i IO, err error) {
+	var pipes []*pipe
+	// cleanup in case of an error
+	defer func() {
+		if err != nil {
+			for _, p := range pipes {
+				p.Close()
+			}
+		}
+	}()
+	stdin, err := newPipe()
+	if err != nil {
+		return nil, err
+	}
+	pipes = append(pipes, stdin)
+	if err = unix.Fchown(int(stdin.r.Fd()), uid, gid); err != nil {
+		return nil, errors.Wrap(err, "failed to chown stdin")
+	}
+
+	stdout, err := newPipe()
+	if err != nil {
+		return nil, err
+	}
+	pipes = append(pipes, stdout)
+	if err = unix.Fchown(int(stdout.w.Fd()), uid, gid); err != nil {
+		return nil, errors.Wrap(err, "failed to chown stdout")
+	}
+
+	stderr, err := newPipe()
+	if err != nil {
+		return nil, err
+	}
+	pipes = append(pipes, stderr)
+	if err = unix.Fchown(int(stderr.w.Fd()), uid, gid); err != nil {
+		return nil, errors.Wrap(err, "failed to chown stderr")
+	}
+
+	return &pipeIO{
+		in:  stdin,
+		out: stdout,
+		err: stderr,
+	}, nil
+}
+
+func newPipe() (*pipe, error) {
+	r, w, err := os.Pipe()
+	if err != nil {
+		return nil, err
+	}
+	return &pipe{
+		r: r,
+		w: w,
+	}, nil
+}
+
+type pipe struct {
+	r *os.File
+	w *os.File
+}
+
+func (p *pipe) Close() error {
+	err := p.r.Close()
+	if werr := p.w.Close(); err == nil {
+		err = werr
+	}
+	return err
+}
+
+type pipeIO struct {
+	in  *pipe
+	out *pipe
+	err *pipe
+}
+
+func (i *pipeIO) Stdin() io.WriteCloser {
+	return i.in.w
+}
+
+func (i *pipeIO) Stdout() io.ReadCloser {
+	return i.out.r
+}
+
+func (i *pipeIO) Stderr() io.ReadCloser {
+	return i.err.r
+}
+
+func (i *pipeIO) Close() error {
+	var err error
+	for _, v := range []*pipe{
+		i.in,
+		i.out,
+		i.err,
+	} {
+		if cerr := v.Close(); err == nil {
+			err = cerr
+		}
+	}
+	return err
+}
+
+func (i *pipeIO) CloseAfterStart() error {
+	for _, f := range []*os.File{
+		i.out.w,
+		i.err.w,
+	} {
+		f.Close()
+	}
+	return nil
+}
+
+// Set sets the io to the exec.Cmd
+func (i *pipeIO) Set(cmd *exec.Cmd) {
+	cmd.Stdin = i.in.r
+	cmd.Stdout = i.out.w
+	cmd.Stderr = i.err.w
+}
+
+func NewSTDIO() (IO, error) {
+	return &stdio{}, nil
+}
+
+type stdio struct {
+}
+
+func (s *stdio) Close() error {
+	return nil
+}
+
+func (s *stdio) Set(cmd *exec.Cmd) {
+	cmd.Stdin = os.Stdin
+	cmd.Stdout = os.Stdout
+	cmd.Stderr = os.Stderr
+}
+
+func (s *stdio) Stdin() io.WriteCloser {
+	return os.Stdin
+}
+
+func (s *stdio) Stdout() io.ReadCloser {
+	return os.Stdout
+}
+
+func (s *stdio) Stderr() io.ReadCloser {
+	return os.Stderr
+}
+
+// NewNullIO returns IO setup for /dev/null use with runc
+func NewNullIO() (IO, error) {
+	f, err := os.Open(os.DevNull)
+	if err != nil {
+		return nil, err
+	}
+	return &nullIO{
+		devNull: f,
+	}, nil
+}
+
+type nullIO struct {
+	devNull *os.File
+}
+
+func (n *nullIO) Close() error {
+	// this should be closed after start but if not
+	// make sure we close the file but don't return the error
+	n.devNull.Close()
+	return nil
+}
+
+func (n *nullIO) Stdin() io.WriteCloser {
+	return nil
+}
+
+func (n *nullIO) Stdout() io.ReadCloser {
+	return nil
+}
+
+func (n *nullIO) Stderr() io.ReadCloser {
+	return nil
+}
+
+func (n *nullIO) Set(c *exec.Cmd) {
+	// don't set STDIN here
+	c.Stdout = n.devNull
+	c.Stderr = n.devNull
+}
+
+func (n *nullIO) CloseAfterStart() error {
+	return n.devNull.Close()
+}
diff --git a/vendor/github.com/containerd/go-runc/monitor.go b/vendor/github.com/containerd/go-runc/monitor.go
new file mode 100644
index 0000000..2d62c5a
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/monitor.go
@@ -0,0 +1,60 @@
+package runc
+
+import (
+	"os/exec"
+	"syscall"
+	"time"
+)
+
+var Monitor ProcessMonitor = &defaultMonitor{}
+
+type Exit struct {
+	Timestamp time.Time
+	Pid       int
+	Status    int
+}
+
+// ProcessMonitor is an interface for process monitoring
+//
+// It allows daemons using go-runc to have a SIGCHLD handler
+// to handle exits without introducing races between the handler
+// and go's exec.Cmd
+// These methods should match the methods exposed by exec.Cmd to provide
+// a consistent experience for the caller
+type ProcessMonitor interface {
+	Start(*exec.Cmd) (chan Exit, error)
+	Wait(*exec.Cmd, chan Exit) (int, error)
+}
+
+type defaultMonitor struct {
+}
+
+func (m *defaultMonitor) Start(c *exec.Cmd) (chan Exit, error) {
+	if err := c.Start(); err != nil {
+		return nil, err
+	}
+	ec := make(chan Exit, 1)
+	go func() {
+		var status int
+		if err := c.Wait(); err != nil {
+			status = 255
+			if exitErr, ok := err.(*exec.ExitError); ok {
+				if ws, ok := exitErr.Sys().(syscall.WaitStatus); ok {
+					status = ws.ExitStatus()
+				}
+			}
+		}
+		ec <- Exit{
+			Timestamp: time.Now(),
+			Pid:       c.Process.Pid,
+			Status:    status,
+		}
+		close(ec)
+	}()
+	return ec, nil
+}
+
+func (m *defaultMonitor) Wait(c *exec.Cmd, ec chan Exit) (int, error) {
+	e := <-ec
+	return e.Status, nil
+}
diff --git a/vendor/github.com/containerd/go-runc/runc.go b/vendor/github.com/containerd/go-runc/runc.go
new file mode 100644
index 0000000..c5a66a1
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/runc.go
@@ -0,0 +1,658 @@
+package runc
+
+import (
+	"bytes"
+	"context"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"os/exec"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"syscall"
+	"time"
+
+	specs "github.com/opencontainers/runtime-spec/specs-go"
+)
+
+// Format is the type of log formatting options avaliable
+type Format string
+
+const (
+	none Format = ""
+	JSON Format = "json"
+	Text Format = "text"
+	// DefaultCommand is the default command for Runc
+	DefaultCommand = "runc"
+)
+
+// Runc is the client to the runc cli
+type Runc struct {
+	//If command is empty, DefaultCommand is used
+	Command       string
+	Root          string
+	Debug         bool
+	Log           string
+	LogFormat     Format
+	PdeathSignal  syscall.Signal
+	Setpgid       bool
+	Criu          string
+	SystemdCgroup bool
+}
+
+// List returns all containers created inside the provided runc root directory
+func (r *Runc) List(context context.Context) ([]*Container, error) {
+	data, err := cmdOutput(r.command(context, "list", "--format=json"), false)
+	if err != nil {
+		return nil, err
+	}
+	var out []*Container
+	if err := json.Unmarshal(data, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// State returns the state for the container provided by id
+func (r *Runc) State(context context.Context, id string) (*Container, error) {
+	data, err := cmdOutput(r.command(context, "state", id), true)
+	if err != nil {
+		return nil, fmt.Errorf("%s: %s", err, data)
+	}
+	var c Container
+	if err := json.Unmarshal(data, &c); err != nil {
+		return nil, err
+	}
+	return &c, nil
+}
+
+type ConsoleSocket interface {
+	Path() string
+}
+
+type CreateOpts struct {
+	IO
+	// PidFile is a path to where a pid file should be created
+	PidFile       string
+	ConsoleSocket ConsoleSocket
+	Detach        bool
+	NoPivot       bool
+	NoNewKeyring  bool
+	ExtraFiles    []*os.File
+}
+
+func (o *CreateOpts) args() (out []string, err error) {
+	if o.PidFile != "" {
+		abs, err := filepath.Abs(o.PidFile)
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, "--pid-file", abs)
+	}
+	if o.ConsoleSocket != nil {
+		out = append(out, "--console-socket", o.ConsoleSocket.Path())
+	}
+	if o.NoPivot {
+		out = append(out, "--no-pivot")
+	}
+	if o.NoNewKeyring {
+		out = append(out, "--no-new-keyring")
+	}
+	if o.Detach {
+		out = append(out, "--detach")
+	}
+	if o.ExtraFiles != nil {
+		out = append(out, "--preserve-fds", strconv.Itoa(len(o.ExtraFiles)))
+	}
+	return out, nil
+}
+
+// Create creates a new container and returns its pid if it was created successfully
+func (r *Runc) Create(context context.Context, id, bundle string, opts *CreateOpts) error {
+	args := []string{"create", "--bundle", bundle}
+	if opts != nil {
+		oargs, err := opts.args()
+		if err != nil {
+			return err
+		}
+		args = append(args, oargs...)
+	}
+	cmd := r.command(context, append(args, id)...)
+	if opts != nil && opts.IO != nil {
+		opts.Set(cmd)
+	}
+	cmd.ExtraFiles = opts.ExtraFiles
+
+	if cmd.Stdout == nil && cmd.Stderr == nil {
+		data, err := cmdOutput(cmd, true)
+		if err != nil {
+			return fmt.Errorf("%s: %s", err, data)
+		}
+		return nil
+	}
+	ec, err := Monitor.Start(cmd)
+	if err != nil {
+		return err
+	}
+	if opts != nil && opts.IO != nil {
+		if c, ok := opts.IO.(StartCloser); ok {
+			if err := c.CloseAfterStart(); err != nil {
+				return err
+			}
+		}
+	}
+	status, err := Monitor.Wait(cmd, ec)
+	if err == nil && status != 0 {
+		err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0])
+	}
+	return err
+}
+
+// Start will start an already created container
+func (r *Runc) Start(context context.Context, id string) error {
+	return r.runOrError(r.command(context, "start", id))
+}
+
+type ExecOpts struct {
+	IO
+	PidFile       string
+	ConsoleSocket ConsoleSocket
+	Detach        bool
+}
+
+func (o *ExecOpts) args() (out []string, err error) {
+	if o.ConsoleSocket != nil {
+		out = append(out, "--console-socket", o.ConsoleSocket.Path())
+	}
+	if o.Detach {
+		out = append(out, "--detach")
+	}
+	if o.PidFile != "" {
+		abs, err := filepath.Abs(o.PidFile)
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, "--pid-file", abs)
+	}
+	return out, nil
+}
+
+// Exec executres and additional process inside the container based on a full
+// OCI Process specification
+func (r *Runc) Exec(context context.Context, id string, spec specs.Process, opts *ExecOpts) error {
+	f, err := ioutil.TempFile("", "runc-process")
+	if err != nil {
+		return err
+	}
+	defer os.Remove(f.Name())
+	err = json.NewEncoder(f).Encode(spec)
+	f.Close()
+	if err != nil {
+		return err
+	}
+	args := []string{"exec", "--process", f.Name()}
+	if opts != nil {
+		oargs, err := opts.args()
+		if err != nil {
+			return err
+		}
+		args = append(args, oargs...)
+	}
+	cmd := r.command(context, append(args, id)...)
+	if opts != nil && opts.IO != nil {
+		opts.Set(cmd)
+	}
+	if cmd.Stdout == nil && cmd.Stderr == nil {
+		data, err := cmdOutput(cmd, true)
+		if err != nil {
+			return fmt.Errorf("%s: %s", err, data)
+		}
+		return nil
+	}
+	ec, err := Monitor.Start(cmd)
+	if err != nil {
+		return err
+	}
+	if opts != nil && opts.IO != nil {
+		if c, ok := opts.IO.(StartCloser); ok {
+			if err := c.CloseAfterStart(); err != nil {
+				return err
+			}
+		}
+	}
+	status, err := Monitor.Wait(cmd, ec)
+	if err == nil && status != 0 {
+		err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0])
+	}
+	return err
+}
+
+// Run runs the create, start, delete lifecycle of the container
+// and returns its exit status after it has exited
+func (r *Runc) Run(context context.Context, id, bundle string, opts *CreateOpts) (int, error) {
+	args := []string{"run", "--bundle", bundle}
+	if opts != nil {
+		oargs, err := opts.args()
+		if err != nil {
+			return -1, err
+		}
+		args = append(args, oargs...)
+	}
+	cmd := r.command(context, append(args, id)...)
+	if opts != nil && opts.IO != nil {
+		opts.Set(cmd)
+	}
+	ec, err := Monitor.Start(cmd)
+	if err != nil {
+		return -1, err
+	}
+	return Monitor.Wait(cmd, ec)
+}
+
+type DeleteOpts struct {
+	Force bool
+}
+
+func (o *DeleteOpts) args() (out []string) {
+	if o.Force {
+		out = append(out, "--force")
+	}
+	return out
+}
+
+// Delete deletes the container
+func (r *Runc) Delete(context context.Context, id string, opts *DeleteOpts) error {
+	args := []string{"delete"}
+	if opts != nil {
+		args = append(args, opts.args()...)
+	}
+	return r.runOrError(r.command(context, append(args, id)...))
+}
+
+// KillOpts specifies options for killing a container and its processes
+type KillOpts struct {
+	All bool
+}
+
+func (o *KillOpts) args() (out []string) {
+	if o.All {
+		out = append(out, "--all")
+	}
+	return out
+}
+
+// Kill sends the specified signal to the container
+func (r *Runc) Kill(context context.Context, id string, sig int, opts *KillOpts) error {
+	args := []string{
+		"kill",
+	}
+	if opts != nil {
+		args = append(args, opts.args()...)
+	}
+	return r.runOrError(r.command(context, append(args, id, strconv.Itoa(sig))...))
+}
+
+// Stats return the stats for a container like cpu, memory, and io
+func (r *Runc) Stats(context context.Context, id string) (*Stats, error) {
+	cmd := r.command(context, "events", "--stats", id)
+	rd, err := cmd.StdoutPipe()
+	if err != nil {
+		return nil, err
+	}
+	ec, err := Monitor.Start(cmd)
+	if err != nil {
+		return nil, err
+	}
+	defer func() {
+		rd.Close()
+		Monitor.Wait(cmd, ec)
+	}()
+	var e Event
+	if err := json.NewDecoder(rd).Decode(&e); err != nil {
+		return nil, err
+	}
+	return e.Stats, nil
+}
+
+// Events returns an event stream from runc for a container with stats and OOM notifications
+func (r *Runc) Events(context context.Context, id string, interval time.Duration) (chan *Event, error) {
+	cmd := r.command(context, "events", fmt.Sprintf("--interval=%ds", int(interval.Seconds())), id)
+	rd, err := cmd.StdoutPipe()
+	if err != nil {
+		return nil, err
+	}
+	ec, err := Monitor.Start(cmd)
+	if err != nil {
+		rd.Close()
+		return nil, err
+	}
+	var (
+		dec = json.NewDecoder(rd)
+		c   = make(chan *Event, 128)
+	)
+	go func() {
+		defer func() {
+			close(c)
+			rd.Close()
+			Monitor.Wait(cmd, ec)
+		}()
+		for {
+			var e Event
+			if err := dec.Decode(&e); err != nil {
+				if err == io.EOF {
+					return
+				}
+				e = Event{
+					Type: "error",
+					Err:  err,
+				}
+			}
+			c <- &e
+		}
+	}()
+	return c, nil
+}
+
+// Pause the container with the provided id
+func (r *Runc) Pause(context context.Context, id string) error {
+	return r.runOrError(r.command(context, "pause", id))
+}
+
+// Resume the container with the provided id
+func (r *Runc) Resume(context context.Context, id string) error {
+	return r.runOrError(r.command(context, "resume", id))
+}
+
+// Ps lists all the processes inside the container returning their pids
+func (r *Runc) Ps(context context.Context, id string) ([]int, error) {
+	data, err := cmdOutput(r.command(context, "ps", "--format", "json", id), true)
+	if err != nil {
+		return nil, fmt.Errorf("%s: %s", err, data)
+	}
+	var pids []int
+	if err := json.Unmarshal(data, &pids); err != nil {
+		return nil, err
+	}
+	return pids, nil
+}
+
+type CheckpointOpts struct {
+	// ImagePath is the path for saving the criu image file
+	ImagePath string
+	// WorkDir is the working directory for criu
+	WorkDir string
+	// ParentPath is the path for previous image files from a pre-dump
+	ParentPath string
+	// AllowOpenTCP allows open tcp connections to be checkpointed
+	AllowOpenTCP bool
+	// AllowExternalUnixSockets allows external unix sockets to be checkpointed
+	AllowExternalUnixSockets bool
+	// AllowTerminal allows the terminal(pty) to be checkpointed with a container
+	AllowTerminal bool
+	// CriuPageServer is the address:port for the criu page server
+	CriuPageServer string
+	// FileLocks handle file locks held by the container
+	FileLocks bool
+	// Cgroups is the cgroup mode for how to handle the checkpoint of a container's cgroups
+	Cgroups CgroupMode
+	// EmptyNamespaces creates a namespace for the container but does not save its properties
+	// Provide the namespaces you wish to be checkpointed without their settings on restore
+	EmptyNamespaces []string
+}
+
+type CgroupMode string
+
+const (
+	Soft   CgroupMode = "soft"
+	Full   CgroupMode = "full"
+	Strict CgroupMode = "strict"
+)
+
+func (o *CheckpointOpts) args() (out []string) {
+	if o.ImagePath != "" {
+		out = append(out, "--image-path", o.ImagePath)
+	}
+	if o.WorkDir != "" {
+		out = append(out, "--work-path", o.WorkDir)
+	}
+	if o.ParentPath != "" {
+		out = append(out, "--parent-path", o.ParentPath)
+	}
+	if o.AllowOpenTCP {
+		out = append(out, "--tcp-established")
+	}
+	if o.AllowExternalUnixSockets {
+		out = append(out, "--ext-unix-sk")
+	}
+	if o.AllowTerminal {
+		out = append(out, "--shell-job")
+	}
+	if o.CriuPageServer != "" {
+		out = append(out, "--page-server", o.CriuPageServer)
+	}
+	if o.FileLocks {
+		out = append(out, "--file-locks")
+	}
+	if string(o.Cgroups) != "" {
+		out = append(out, "--manage-cgroups-mode", string(o.Cgroups))
+	}
+	for _, ns := range o.EmptyNamespaces {
+		out = append(out, "--empty-ns", ns)
+	}
+	return out
+}
+
+type CheckpointAction func([]string) []string
+
+// LeaveRunning keeps the container running after the checkpoint has been completed
+func LeaveRunning(args []string) []string {
+	return append(args, "--leave-running")
+}
+
+// PreDump allows a pre-dump of the checkpoint to be made and completed later
+func PreDump(args []string) []string {
+	return append(args, "--pre-dump")
+}
+
+// Checkpoint allows you to checkpoint a container using criu
+func (r *Runc) Checkpoint(context context.Context, id string, opts *CheckpointOpts, actions ...CheckpointAction) error {
+	args := []string{"checkpoint"}
+	if opts != nil {
+		args = append(args, opts.args()...)
+	}
+	for _, a := range actions {
+		args = a(args)
+	}
+	return r.runOrError(r.command(context, append(args, id)...))
+}
+
+type RestoreOpts struct {
+	CheckpointOpts
+	IO
+
+	Detach      bool
+	PidFile     string
+	NoSubreaper bool
+	NoPivot     bool
+}
+
+func (o *RestoreOpts) args() ([]string, error) {
+	out := o.CheckpointOpts.args()
+	if o.Detach {
+		out = append(out, "--detach")
+	}
+	if o.PidFile != "" {
+		abs, err := filepath.Abs(o.PidFile)
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, "--pid-file", abs)
+	}
+	if o.NoPivot {
+		out = append(out, "--no-pivot")
+	}
+	if o.NoSubreaper {
+		out = append(out, "-no-subreaper")
+	}
+	return out, nil
+}
+
+// Restore restores a container with the provide id from an existing checkpoint
+func (r *Runc) Restore(context context.Context, id, bundle string, opts *RestoreOpts) (int, error) {
+	args := []string{"restore"}
+	if opts != nil {
+		oargs, err := opts.args()
+		if err != nil {
+			return -1, err
+		}
+		args = append(args, oargs...)
+	}
+	args = append(args, "--bundle", bundle)
+	cmd := r.command(context, append(args, id)...)
+	if opts != nil && opts.IO != nil {
+		opts.Set(cmd)
+	}
+	ec, err := Monitor.Start(cmd)
+	if err != nil {
+		return -1, err
+	}
+	if opts != nil && opts.IO != nil {
+		if c, ok := opts.IO.(StartCloser); ok {
+			if err := c.CloseAfterStart(); err != nil {
+				return -1, err
+			}
+		}
+	}
+	return Monitor.Wait(cmd, ec)
+}
+
+// Update updates the current container with the provided resource spec
+func (r *Runc) Update(context context.Context, id string, resources *specs.LinuxResources) error {
+	buf := bytes.NewBuffer(nil)
+	if err := json.NewEncoder(buf).Encode(resources); err != nil {
+		return err
+	}
+	args := []string{"update", "--resources", "-", id}
+	cmd := r.command(context, args...)
+	cmd.Stdin = buf
+	return r.runOrError(cmd)
+}
+
+var ErrParseRuncVersion = errors.New("unable to parse runc version")
+
+type Version struct {
+	Runc   string
+	Commit string
+	Spec   string
+}
+
+// Version returns the runc and runtime-spec versions
+func (r *Runc) Version(context context.Context) (Version, error) {
+	data, err := cmdOutput(r.command(context, "--version"), false)
+	if err != nil {
+		return Version{}, err
+	}
+	return parseVersion(data)
+}
+
+func parseVersion(data []byte) (Version, error) {
+	var v Version
+	parts := strings.Split(strings.TrimSpace(string(data)), "\n")
+	if len(parts) != 3 {
+		return v, ErrParseRuncVersion
+	}
+
+	for i, p := range []struct {
+		dest  *string
+		split string
+	}{
+		{
+			dest:  &v.Runc,
+			split: "version ",
+		},
+		{
+			dest:  &v.Commit,
+			split: ": ",
+		},
+		{
+			dest:  &v.Spec,
+			split: ": ",
+		},
+	} {
+		p2 := strings.Split(parts[i], p.split)
+		if len(p2) != 2 {
+			return v, fmt.Errorf("unable to parse version line %q", parts[i])
+		}
+		*p.dest = p2[1]
+	}
+	return v, nil
+}
+
+func (r *Runc) args() (out []string) {
+	if r.Root != "" {
+		out = append(out, "--root", r.Root)
+	}
+	if r.Debug {
+		out = append(out, "--debug")
+	}
+	if r.Log != "" {
+		out = append(out, "--log", r.Log)
+	}
+	if r.LogFormat != none {
+		out = append(out, "--log-format", string(r.LogFormat))
+	}
+	if r.Criu != "" {
+		out = append(out, "--criu", r.Criu)
+	}
+	if r.SystemdCgroup {
+		out = append(out, "--systemd-cgroup")
+	}
+	return out
+}
+
+// runOrError will run the provided command.  If an error is
+// encountered and neither Stdout or Stderr was set the error and the
+// stderr of the command will be returned in the format of <error>:
+// <stderr>
+func (r *Runc) runOrError(cmd *exec.Cmd) error {
+	if cmd.Stdout != nil || cmd.Stderr != nil {
+		ec, err := Monitor.Start(cmd)
+		if err != nil {
+			return err
+		}
+		status, err := Monitor.Wait(cmd, ec)
+		if err == nil && status != 0 {
+			err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0])
+		}
+		return err
+	}
+	data, err := cmdOutput(cmd, true)
+	if err != nil {
+		return fmt.Errorf("%s: %s", err, data)
+	}
+	return nil
+}
+
+func cmdOutput(cmd *exec.Cmd, combined bool) ([]byte, error) {
+	var b bytes.Buffer
+
+	cmd.Stdout = &b
+	if combined {
+		cmd.Stderr = &b
+	}
+	ec, err := Monitor.Start(cmd)
+	if err != nil {
+		return nil, err
+	}
+
+	status, err := Monitor.Wait(cmd, ec)
+	if err == nil && status != 0 {
+		err = fmt.Errorf("%s did not terminate sucessfully", cmd.Args[0])
+	}
+
+	return b.Bytes(), err
+}
diff --git a/vendor/github.com/containerd/go-runc/utils.go b/vendor/github.com/containerd/go-runc/utils.go
new file mode 100644
index 0000000..81fcd3f
--- /dev/null
+++ b/vendor/github.com/containerd/go-runc/utils.go
@@ -0,0 +1,28 @@
+package runc
+
+import (
+	"io/ioutil"
+	"strconv"
+	"syscall"
+)
+
+// ReadPidFile reads the pid file at the provided path and returns
+// the pid or an error if the read and conversion is unsuccessful
+func ReadPidFile(path string) (int, error) {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return -1, err
+	}
+	return strconv.Atoi(string(data))
+}
+
+const exitSignalOffset = 128
+
+// exitStatus returns the correct exit status for a process based on if it
+// was signaled or exited cleanly
+func exitStatus(status syscall.WaitStatus) int {
+	if status.Signaled() {
+		return exitSignalOffset + int(status.Signal())
+	}
+	return status.ExitStatus()
+}
diff --git a/vendor/github.com/containerd/typeurl/LICENSE b/vendor/github.com/containerd/typeurl/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/containerd/typeurl/README.md b/vendor/github.com/containerd/typeurl/README.md
new file mode 100644
index 0000000..e078774
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/README.md
@@ -0,0 +1,9 @@
+# typeurl
+
+[![Build Status](https://travis-ci.org/containerd/typeurl.svg?branch=master)](https://travis-ci.org/containerd/typeurl)
+
+[![codecov](https://codecov.io/gh/containerd/typeurl/branch/master/graph/badge.svg)](https://codecov.io/gh/containerd/typeurl)
+
+A Go package for managing the registration, marshaling, and unmarshaling of encoded types.
+
+This package helps when types are sent over a GRPC API and marshaled as a [protobuf.Any]().
diff --git a/vendor/github.com/containerd/typeurl/types.go b/vendor/github.com/containerd/typeurl/types.go
new file mode 100644
index 0000000..10a7822
--- /dev/null
+++ b/vendor/github.com/containerd/typeurl/types.go
@@ -0,0 +1,142 @@
+package typeurl
+
+import (
+	"encoding/json"
+	"path"
+	"reflect"
+	"sync"
+
+	"github.com/gogo/protobuf/proto"
+	"github.com/gogo/protobuf/types"
+	"github.com/pkg/errors"
+)
+
+var (
+	mu       sync.Mutex
+	registry = make(map[reflect.Type]string)
+)
+
+var ErrNotFound = errors.New("not found")
+
+// Register a type with the base url of the type
+func Register(v interface{}, args ...string) {
+	var (
+		t = tryDereference(v)
+		p = path.Join(args...)
+	)
+	mu.Lock()
+	defer mu.Unlock()
+	if et, ok := registry[t]; ok {
+		if et != p {
+			panic(errors.Errorf("type registred with alternate path %q != %q", et, p))
+		}
+		return
+	}
+	registry[t] = p
+}
+
+// TypeURL returns the type url for a registred type
+func TypeURL(v interface{}) (string, error) {
+	mu.Lock()
+	u, ok := registry[tryDereference(v)]
+	mu.Unlock()
+	if !ok {
+		// fallback to the proto registry if it is a proto message
+		pb, ok := v.(proto.Message)
+		if !ok {
+			return "", errors.Wrapf(ErrNotFound, "type %s", reflect.TypeOf(v))
+		}
+		return proto.MessageName(pb), nil
+	}
+	return u, nil
+}
+
+// Is returns true if the type of the Any is the same as v
+func Is(any *types.Any, v interface{}) bool {
+	// call to check that v is a pointer
+	tryDereference(v)
+	url, err := TypeURL(v)
+	if err != nil {
+		return false
+	}
+	return any.TypeUrl == url
+}
+
+// MarshalAny marshals the value v into an any with the correct TypeUrl
+func MarshalAny(v interface{}) (*types.Any, error) {
+	var marshal func(v interface{}) ([]byte, error)
+	switch t := v.(type) {
+	case *types.Any:
+		// avoid reserializing the type if we have an any.
+		return t, nil
+	case proto.Message:
+		marshal = func(v interface{}) ([]byte, error) {
+			return proto.Marshal(t)
+		}
+	default:
+		marshal = json.Marshal
+	}
+
+	url, err := TypeURL(v)
+	if err != nil {
+		return nil, err
+	}
+
+	data, err := marshal(v)
+	if err != nil {
+		return nil, err
+	}
+	return &types.Any{
+		TypeUrl: url,
+		Value:   data,
+	}, nil
+}
+
+// UnmarshalAny unmarshals the any type into a concrete type
+func UnmarshalAny(any *types.Any) (interface{}, error) {
+	t, err := getTypeByUrl(any.TypeUrl)
+	if err != nil {
+		return nil, err
+	}
+	v := reflect.New(t.t).Interface()
+	if t.isProto {
+		err = proto.Unmarshal(any.Value, v.(proto.Message))
+	} else {
+		err = json.Unmarshal(any.Value, v)
+	}
+	return v, err
+}
+
+type urlType struct {
+	t       reflect.Type
+	isProto bool
+}
+
+func getTypeByUrl(url string) (urlType, error) {
+	for t, u := range registry {
+		if u == url {
+			return urlType{
+				t: t,
+			}, nil
+		}
+	}
+	// fallback to proto registry
+	t := proto.MessageType(url)
+	if t != nil {
+		return urlType{
+			// get the underlying Elem because proto returns a pointer to the type
+			t:       t.Elem(),
+			isProto: true,
+		}, nil
+	}
+	return urlType{}, errors.Wrapf(ErrNotFound, "type with url %s", url)
+}
+
+func tryDereference(v interface{}) reflect.Type {
+	t := reflect.TypeOf(v)
+	if t.Kind() == reflect.Ptr {
+		// require check of pointer but dereference to register
+		return t.Elem()
+	}
+	panic("v is not a pointer to a type")
+}
diff --git a/vendor/github.com/coreos/go-systemd/activation/listeners.go b/vendor/github.com/coreos/go-systemd/activation/listeners.go
index df27c29..fd5dfc7 100644
--- a/vendor/github.com/coreos/go-systemd/activation/listeners.go
+++ b/vendor/github.com/coreos/go-systemd/activation/listeners.go
@@ -48,8 +48,6 @@
 	}
 
 	if tlsConfig != nil && err == nil {
-		tlsConfig.NextProtos = []string{"http/1.1"}
-
 		for i, l := range listeners {
 			// Activate TLS only for TCP sockets
 			if l.Addr().Network() == "tcp" {
diff --git a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
index b92b191..ba6d41d 100644
--- a/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
+++ b/vendor/github.com/coreos/go-systemd/daemon/sdnotify.go
@@ -1,31 +1,63 @@
+// Copyright 2014 Docker, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//    http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
 // Code forked from Docker project
 package daemon
 
 import (
-	"errors"
 	"net"
 	"os"
 )
 
-var SdNotifyNoSocket = errors.New("No socket")
-
 // SdNotify sends a message to the init daemon. It is common to ignore the error.
-func SdNotify(state string) error {
+// If `unsetEnvironment` is true, the environment variable `NOTIFY_SOCKET`
+// will be unconditionally unset.
+//
+// It returns one of the following:
+// (false, nil) - notification not supported (i.e. NOTIFY_SOCKET is unset)
+// (false, err) - notification supported, but failure happened (e.g. error connecting to NOTIFY_SOCKET or while sending data)
+// (true, nil) - notification supported, data has been sent
+func SdNotify(unsetEnvironment bool, state string) (sent bool, err error) {
 	socketAddr := &net.UnixAddr{
 		Name: os.Getenv("NOTIFY_SOCKET"),
 		Net:  "unixgram",
 	}
 
+	// NOTIFY_SOCKET not set
 	if socketAddr.Name == "" {
-		return SdNotifyNoSocket
+		return false, nil
+	}
+
+	if unsetEnvironment {
+		err = os.Unsetenv("NOTIFY_SOCKET")
+	}
+	if err != nil {
+		return false, err
 	}
 
 	conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr)
+	// Error connecting to NOTIFY_SOCKET
 	if err != nil {
-		return err
+		return false, err
 	}
 	defer conn.Close()
 
 	_, err = conn.Write([]byte(state))
-	return err
+	// Error sending the message
+	if err != nil {
+		return false, err
+	}
+	return true, nil
 }
diff --git a/vendor/github.com/coreos/go-systemd/daemon/watchdog.go b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go
new file mode 100644
index 0000000..35a92e6
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/daemon/watchdog.go
@@ -0,0 +1,72 @@
+// Copyright 2016 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package daemon
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"time"
+)
+
+// SdWatchdogEnabled return watchdog information for a service.
+// Process should send daemon.SdNotify("WATCHDOG=1") every time / 2.
+// If `unsetEnvironment` is true, the environment variables `WATCHDOG_USEC`
+// and `WATCHDOG_PID` will be unconditionally unset.
+//
+// It returns one of the following:
+// (0, nil) - watchdog isn't enabled or we aren't the watched PID.
+// (0, err) - an error happened (e.g. error converting time).
+// (time, nil) - watchdog is enabled and we can send ping.
+//   time is delay before inactive service will be killed.
+func SdWatchdogEnabled(unsetEnvironment bool) (time.Duration, error) {
+	wusec := os.Getenv("WATCHDOG_USEC")
+	wpid := os.Getenv("WATCHDOG_PID")
+	if unsetEnvironment {
+		wusecErr := os.Unsetenv("WATCHDOG_USEC")
+		wpidErr := os.Unsetenv("WATCHDOG_PID")
+		if wusecErr != nil {
+			return 0, wusecErr
+		}
+		if wpidErr != nil {
+			return 0, wpidErr
+		}
+	}
+
+	if wusec == "" {
+		return 0, nil
+	}
+	s, err := strconv.Atoi(wusec)
+	if err != nil {
+		return 0, fmt.Errorf("error converting WATCHDOG_USEC: %s", err)
+	}
+	if s <= 0 {
+		return 0, fmt.Errorf("error WATCHDOG_USEC must be a positive number")
+	}
+	interval := time.Duration(s) * time.Microsecond
+
+	if wpid == "" {
+		return interval, nil
+	}
+	p, err := strconv.Atoi(wpid)
+	if err != nil {
+		return 0, fmt.Errorf("error converting WATCHDOG_PID: %s", err)
+	}
+	if os.Getpid() != p {
+		return 0, nil
+	}
+
+	return interval, nil
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/dbus.go b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
new file mode 100644
index 0000000..c1694fb
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/dbus.go
@@ -0,0 +1,213 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Integration with the systemd D-Bus API.  See http://www.freedesktop.org/wiki/Software/systemd/dbus/
+package dbus
+
+import (
+	"fmt"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/godbus/dbus"
+)
+
+const (
+	alpha        = `abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ`
+	num          = `0123456789`
+	alphanum     = alpha + num
+	signalBuffer = 100
+)
+
+// needsEscape checks whether a byte in a potential dbus ObjectPath needs to be escaped
+func needsEscape(i int, b byte) bool {
+	// Escape everything that is not a-z-A-Z-0-9
+	// Also escape 0-9 if it's the first character
+	return strings.IndexByte(alphanum, b) == -1 ||
+		(i == 0 && strings.IndexByte(num, b) != -1)
+}
+
+// PathBusEscape sanitizes a constituent string of a dbus ObjectPath using the
+// rules that systemd uses for serializing special characters.
+func PathBusEscape(path string) string {
+	// Special case the empty string
+	if len(path) == 0 {
+		return "_"
+	}
+	n := []byte{}
+	for i := 0; i < len(path); i++ {
+		c := path[i]
+		if needsEscape(i, c) {
+			e := fmt.Sprintf("_%x", c)
+			n = append(n, []byte(e)...)
+		} else {
+			n = append(n, c)
+		}
+	}
+	return string(n)
+}
+
+// Conn is a connection to systemd's dbus endpoint.
+type Conn struct {
+	// sysconn/sysobj are only used to call dbus methods
+	sysconn *dbus.Conn
+	sysobj  dbus.BusObject
+
+	// sigconn/sigobj are only used to receive dbus signals
+	sigconn *dbus.Conn
+	sigobj  dbus.BusObject
+
+	jobListener struct {
+		jobs map[dbus.ObjectPath]chan<- string
+		sync.Mutex
+	}
+	subscriber struct {
+		updateCh chan<- *SubStateUpdate
+		errCh    chan<- error
+		sync.Mutex
+		ignore      map[dbus.ObjectPath]int64
+		cleanIgnore int64
+	}
+}
+
+// New establishes a connection to any available bus and authenticates.
+// Callers should call Close() when done with the connection.
+func New() (*Conn, error) {
+	conn, err := NewSystemConnection()
+	if err != nil && os.Geteuid() == 0 {
+		return NewSystemdConnection()
+	}
+	return conn, err
+}
+
+// NewSystemConnection establishes a connection to the system bus and authenticates.
+// Callers should call Close() when done with the connection
+func NewSystemConnection() (*Conn, error) {
+	return NewConnection(func() (*dbus.Conn, error) {
+		return dbusAuthHelloConnection(dbus.SystemBusPrivate)
+	})
+}
+
+// NewUserConnection establishes a connection to the session bus and
+// authenticates. This can be used to connect to systemd user instances.
+// Callers should call Close() when done with the connection.
+func NewUserConnection() (*Conn, error) {
+	return NewConnection(func() (*dbus.Conn, error) {
+		return dbusAuthHelloConnection(dbus.SessionBusPrivate)
+	})
+}
+
+// NewSystemdConnection establishes a private, direct connection to systemd.
+// This can be used for communicating with systemd without a dbus daemon.
+// Callers should call Close() when done with the connection.
+func NewSystemdConnection() (*Conn, error) {
+	return NewConnection(func() (*dbus.Conn, error) {
+		// We skip Hello when talking directly to systemd.
+		return dbusAuthConnection(func() (*dbus.Conn, error) {
+			return dbus.Dial("unix:path=/run/systemd/private")
+		})
+	})
+}
+
+// Close closes an established connection
+func (c *Conn) Close() {
+	c.sysconn.Close()
+	c.sigconn.Close()
+}
+
+// NewConnection establishes a connection to a bus using a caller-supplied function.
+// This allows connecting to remote buses through a user-supplied mechanism.
+// The supplied function may be called multiple times, and should return independent connections.
+// The returned connection must be fully initialised: the org.freedesktop.DBus.Hello call must have succeeded,
+// and any authentication should be handled by the function.
+func NewConnection(dialBus func() (*dbus.Conn, error)) (*Conn, error) {
+	sysconn, err := dialBus()
+	if err != nil {
+		return nil, err
+	}
+
+	sigconn, err := dialBus()
+	if err != nil {
+		sysconn.Close()
+		return nil, err
+	}
+
+	c := &Conn{
+		sysconn: sysconn,
+		sysobj:  systemdObject(sysconn),
+		sigconn: sigconn,
+		sigobj:  systemdObject(sigconn),
+	}
+
+	c.subscriber.ignore = make(map[dbus.ObjectPath]int64)
+	c.jobListener.jobs = make(map[dbus.ObjectPath]chan<- string)
+
+	// Setup the listeners on jobs so that we can get completions
+	c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+		"type='signal', interface='org.freedesktop.systemd1.Manager', member='JobRemoved'")
+
+	c.dispatch()
+	return c, nil
+}
+
+// GetManagerProperty returns the value of a property on the org.freedesktop.systemd1.Manager
+// interface. The value is returned in its string representation, as defined at
+// https://developer.gnome.org/glib/unstable/gvariant-text.html
+func (c *Conn) GetManagerProperty(prop string) (string, error) {
+	variant, err := c.sysobj.GetProperty("org.freedesktop.systemd1.Manager." + prop)
+	if err != nil {
+		return "", err
+	}
+	return variant.String(), nil
+}
+
+func dbusAuthConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+	conn, err := createBus()
+	if err != nil {
+		return nil, err
+	}
+
+	// Only use EXTERNAL method, and hardcode the uid (not username)
+	// to avoid a username lookup (which requires a dynamically linked
+	// libc)
+	methods := []dbus.Auth{dbus.AuthExternal(strconv.Itoa(os.Getuid()))}
+
+	err = conn.Auth(methods)
+	if err != nil {
+		conn.Close()
+		return nil, err
+	}
+
+	return conn, nil
+}
+
+func dbusAuthHelloConnection(createBus func() (*dbus.Conn, error)) (*dbus.Conn, error) {
+	conn, err := dbusAuthConnection(createBus)
+	if err != nil {
+		return nil, err
+	}
+
+	if err = conn.Hello(); err != nil {
+		conn.Close()
+		return nil, err
+	}
+
+	return conn, nil
+}
+
+func systemdObject(conn *dbus.Conn) dbus.BusObject {
+	return conn.Object("org.freedesktop.systemd1", dbus.ObjectPath("/org/freedesktop/systemd1"))
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/methods.go b/vendor/github.com/coreos/go-systemd/dbus/methods.go
new file mode 100644
index 0000000..ab17f7c
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/methods.go
@@ -0,0 +1,565 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+	"errors"
+	"path"
+	"strconv"
+
+	"github.com/godbus/dbus"
+)
+
+func (c *Conn) jobComplete(signal *dbus.Signal) {
+	var id uint32
+	var job dbus.ObjectPath
+	var unit string
+	var result string
+	dbus.Store(signal.Body, &id, &job, &unit, &result)
+	c.jobListener.Lock()
+	out, ok := c.jobListener.jobs[job]
+	if ok {
+		out <- result
+		delete(c.jobListener.jobs, job)
+	}
+	c.jobListener.Unlock()
+}
+
+func (c *Conn) startJob(ch chan<- string, job string, args ...interface{}) (int, error) {
+	if ch != nil {
+		c.jobListener.Lock()
+		defer c.jobListener.Unlock()
+	}
+
+	var p dbus.ObjectPath
+	err := c.sysobj.Call(job, 0, args...).Store(&p)
+	if err != nil {
+		return 0, err
+	}
+
+	if ch != nil {
+		c.jobListener.jobs[p] = ch
+	}
+
+	// ignore error since 0 is fine if conversion fails
+	jobID, _ := strconv.Atoi(path.Base(string(p)))
+
+	return jobID, nil
+}
+
+// StartUnit enqueues a start job and depending jobs, if any (unless otherwise
+// specified by the mode string).
+//
+// Takes the unit to activate, plus a mode string. The mode needs to be one of
+// replace, fail, isolate, ignore-dependencies, ignore-requirements. If
+// "replace" the call will start the unit and its dependencies, possibly
+// replacing already queued jobs that conflict with this. If "fail" the call
+// will start the unit and its dependencies, but will fail if this would change
+// an already queued job. If "isolate" the call will start the unit in question
+// and terminate all units that aren't dependencies of it. If
+// "ignore-dependencies" it will start a unit but ignore all its dependencies.
+// If "ignore-requirements" it will start a unit but only ignore the
+// requirement dependencies. It is not recommended to make use of the latter
+// two options.
+//
+// If the provided channel is non-nil, a result string will be sent to it upon
+// job completion: one of done, canceled, timeout, failed, dependency, skipped.
+// done indicates successful execution of a job. canceled indicates that a job
+// has been canceled  before it finished execution. timeout indicates that the
+// job timeout was reached. failed indicates that the job failed. dependency
+// indicates that a job this job has been depending on failed and the job hence
+// has been removed too. skipped indicates that a job was skipped because it
+// didn't apply to the units current state.
+//
+// If no error occurs, the ID of the underlying systemd job will be returned. There
+// does exist the possibility for no error to be returned, but for the returned job
+// ID to be 0. In this case, the actual underlying ID is not 0 and this datapoint
+// should not be considered authoritative.
+//
+// If an error does occur, it will be returned to the user alongside a job ID of 0.
+func (c *Conn) StartUnit(name string, mode string, ch chan<- string) (int, error) {
+	return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartUnit", name, mode)
+}
+
+// StopUnit is similar to StartUnit but stops the specified unit rather
+// than starting it.
+func (c *Conn) StopUnit(name string, mode string, ch chan<- string) (int, error) {
+	return c.startJob(ch, "org.freedesktop.systemd1.Manager.StopUnit", name, mode)
+}
+
+// ReloadUnit reloads a unit.  Reloading is done only if the unit is already running and fails otherwise.
+func (c *Conn) ReloadUnit(name string, mode string, ch chan<- string) (int, error) {
+	return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadUnit", name, mode)
+}
+
+// RestartUnit restarts a service.  If a service is restarted that isn't
+// running it will be started.
+func (c *Conn) RestartUnit(name string, mode string, ch chan<- string) (int, error) {
+	return c.startJob(ch, "org.freedesktop.systemd1.Manager.RestartUnit", name, mode)
+}
+
+// TryRestartUnit is like RestartUnit, except that a service that isn't running
+// is not affected by the restart.
+func (c *Conn) TryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+	return c.startJob(ch, "org.freedesktop.systemd1.Manager.TryRestartUnit", name, mode)
+}
+
+// ReloadOrRestart attempts a reload if the unit supports it and use a restart
+// otherwise.
+func (c *Conn) ReloadOrRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+	return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrRestartUnit", name, mode)
+}
+
+// ReloadOrTryRestart attempts a reload if the unit supports it and use a "Try"
+// flavored restart otherwise.
+func (c *Conn) ReloadOrTryRestartUnit(name string, mode string, ch chan<- string) (int, error) {
+	return c.startJob(ch, "org.freedesktop.systemd1.Manager.ReloadOrTryRestartUnit", name, mode)
+}
+
+// StartTransientUnit() may be used to create and start a transient unit, which
+// will be released as soon as it is not running or referenced anymore or the
+// system is rebooted. name is the unit name including suffix, and must be
+// unique. mode is the same as in StartUnit(), properties contains properties
+// of the unit.
+func (c *Conn) StartTransientUnit(name string, mode string, properties []Property, ch chan<- string) (int, error) {
+	return c.startJob(ch, "org.freedesktop.systemd1.Manager.StartTransientUnit", name, mode, properties, make([]PropertyCollection, 0))
+}
+
+// KillUnit takes the unit name and a UNIX signal number to send.  All of the unit's
+// processes are killed.
+func (c *Conn) KillUnit(name string, signal int32) {
+	c.sysobj.Call("org.freedesktop.systemd1.Manager.KillUnit", 0, name, "all", signal).Store()
+}
+
+// ResetFailedUnit resets the "failed" state of a specific unit.
+func (c *Conn) ResetFailedUnit(name string) error {
+	return c.sysobj.Call("org.freedesktop.systemd1.Manager.ResetFailedUnit", 0, name).Store()
+}
+
+// getProperties takes the unit name and returns all of its dbus object properties, for the given dbus interface
+func (c *Conn) getProperties(unit string, dbusInterface string) (map[string]interface{}, error) {
+	var err error
+	var props map[string]dbus.Variant
+
+	path := unitPath(unit)
+	if !path.IsValid() {
+		return nil, errors.New("invalid unit name: " + unit)
+	}
+
+	obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+	err = obj.Call("org.freedesktop.DBus.Properties.GetAll", 0, dbusInterface).Store(&props)
+	if err != nil {
+		return nil, err
+	}
+
+	out := make(map[string]interface{}, len(props))
+	for k, v := range props {
+		out[k] = v.Value()
+	}
+
+	return out, nil
+}
+
+// GetUnitProperties takes the unit name and returns all of its dbus object properties.
+func (c *Conn) GetUnitProperties(unit string) (map[string]interface{}, error) {
+	return c.getProperties(unit, "org.freedesktop.systemd1.Unit")
+}
+
+func (c *Conn) getProperty(unit string, dbusInterface string, propertyName string) (*Property, error) {
+	var err error
+	var prop dbus.Variant
+
+	path := unitPath(unit)
+	if !path.IsValid() {
+		return nil, errors.New("invalid unit name: " + unit)
+	}
+
+	obj := c.sysconn.Object("org.freedesktop.systemd1", path)
+	err = obj.Call("org.freedesktop.DBus.Properties.Get", 0, dbusInterface, propertyName).Store(&prop)
+	if err != nil {
+		return nil, err
+	}
+
+	return &Property{Name: propertyName, Value: prop}, nil
+}
+
+func (c *Conn) GetUnitProperty(unit string, propertyName string) (*Property, error) {
+	return c.getProperty(unit, "org.freedesktop.systemd1.Unit", propertyName)
+}
+
+// GetServiceProperty returns property for given service name and property name
+func (c *Conn) GetServiceProperty(service string, propertyName string) (*Property, error) {
+	return c.getProperty(service, "org.freedesktop.systemd1.Service", propertyName)
+}
+
+// GetUnitTypeProperties returns the extra properties for a unit, specific to the unit type.
+// Valid values for unitType: Service, Socket, Target, Device, Mount, Automount, Snapshot, Timer, Swap, Path, Slice, Scope
+// return "dbus.Error: Unknown interface" if the unitType is not the correct type of the unit
+func (c *Conn) GetUnitTypeProperties(unit string, unitType string) (map[string]interface{}, error) {
+	return c.getProperties(unit, "org.freedesktop.systemd1."+unitType)
+}
+
+// SetUnitProperties() may be used to modify certain unit properties at runtime.
+// Not all properties may be changed at runtime, but many resource management
+// settings (primarily those in systemd.cgroup(5)) may. The changes are applied
+// instantly, and stored on disk for future boots, unless runtime is true, in which
+// case the settings only apply until the next reboot. name is the name of the unit
+// to modify. properties are the settings to set, encoded as an array of property
+// name and value pairs.
+func (c *Conn) SetUnitProperties(name string, runtime bool, properties ...Property) error {
+	return c.sysobj.Call("org.freedesktop.systemd1.Manager.SetUnitProperties", 0, name, runtime, properties).Store()
+}
+
+func (c *Conn) GetUnitTypeProperty(unit string, unitType string, propertyName string) (*Property, error) {
+	return c.getProperty(unit, "org.freedesktop.systemd1."+unitType, propertyName)
+}
+
+type UnitStatus struct {
+	Name        string          // The primary unit name as string
+	Description string          // The human readable description string
+	LoadState   string          // The load state (i.e. whether the unit file has been loaded successfully)
+	ActiveState string          // The active state (i.e. whether the unit is currently started or not)
+	SubState    string          // The sub state (a more fine-grained version of the active state that is specific to the unit type, which the active state is not)
+	Followed    string          // A unit that is being followed in its state by this unit, if there is any, otherwise the empty string.
+	Path        dbus.ObjectPath // The unit object path
+	JobId       uint32          // If there is a job queued for the job unit the numeric job id, 0 otherwise
+	JobType     string          // The job type as string
+	JobPath     dbus.ObjectPath // The job object path
+}
+
+type storeFunc func(retvalues ...interface{}) error
+
+func (c *Conn) listUnitsInternal(f storeFunc) ([]UnitStatus, error) {
+	result := make([][]interface{}, 0)
+	err := f(&result)
+	if err != nil {
+		return nil, err
+	}
+
+	resultInterface := make([]interface{}, len(result))
+	for i := range result {
+		resultInterface[i] = result[i]
+	}
+
+	status := make([]UnitStatus, len(result))
+	statusInterface := make([]interface{}, len(status))
+	for i := range status {
+		statusInterface[i] = &status[i]
+	}
+
+	err = dbus.Store(resultInterface, statusInterface...)
+	if err != nil {
+		return nil, err
+	}
+
+	return status, nil
+}
+
+// ListUnits returns an array with all currently loaded units. Note that
+// units may be known by multiple names at the same time, and hence there might
+// be more unit names loaded than actual units behind them.
+func (c *Conn) ListUnits() ([]UnitStatus, error) {
+	return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnits", 0).Store)
+}
+
+// ListUnitsFiltered returns an array with units filtered by state.
+// It takes a list of units' statuses to filter.
+func (c *Conn) ListUnitsFiltered(states []string) ([]UnitStatus, error) {
+	return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsFiltered", 0, states).Store)
+}
+
+// ListUnitsByPatterns returns an array with units.
+// It takes a list of units' statuses and names to filter.
+// Note that units may be known by multiple names at the same time,
+// and hence there might be more unit names loaded than actual units behind them.
+func (c *Conn) ListUnitsByPatterns(states []string, patterns []string) ([]UnitStatus, error) {
+	return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByPatterns", 0, states, patterns).Store)
+}
+
+// ListUnitsByNames returns an array with units. It takes a list of units'
+// names and returns an UnitStatus array. Comparing to ListUnitsByPatterns
+// method, this method returns statuses even for inactive or non-existing
+// units. Input array should contain exact unit names, but not patterns.
+func (c *Conn) ListUnitsByNames(units []string) ([]UnitStatus, error) {
+	return c.listUnitsInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitsByNames", 0, units).Store)
+}
+
+type UnitFile struct {
+	Path string
+	Type string
+}
+
+func (c *Conn) listUnitFilesInternal(f storeFunc) ([]UnitFile, error) {
+	result := make([][]interface{}, 0)
+	err := f(&result)
+	if err != nil {
+		return nil, err
+	}
+
+	resultInterface := make([]interface{}, len(result))
+	for i := range result {
+		resultInterface[i] = result[i]
+	}
+
+	files := make([]UnitFile, len(result))
+	fileInterface := make([]interface{}, len(files))
+	for i := range files {
+		fileInterface[i] = &files[i]
+	}
+
+	err = dbus.Store(resultInterface, fileInterface...)
+	if err != nil {
+		return nil, err
+	}
+
+	return files, nil
+}
+
+// ListUnitFiles returns an array of all available units on disk.
+func (c *Conn) ListUnitFiles() ([]UnitFile, error) {
+	return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFiles", 0).Store)
+}
+
+// ListUnitFilesByPatterns returns an array of all available units on disk matched the patterns.
+func (c *Conn) ListUnitFilesByPatterns(states []string, patterns []string) ([]UnitFile, error) {
+	return c.listUnitFilesInternal(c.sysobj.Call("org.freedesktop.systemd1.Manager.ListUnitFilesByPatterns", 0, states, patterns).Store)
+}
+
+type LinkUnitFileChange EnableUnitFileChange
+
+// LinkUnitFiles() links unit files (that are located outside of the
+// usual unit search paths) into the unit search path.
+//
+// It takes a list of absolute paths to unit files to link and two
+// booleans. The first boolean controls whether the unit shall be
+// enabled for runtime only (true, /run), or persistently (false,
+// /etc).
+// The second controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns a list of the changes made. The list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) LinkUnitFiles(files []string, runtime bool, force bool) ([]LinkUnitFileChange, error) {
+	result := make([][]interface{}, 0)
+	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.LinkUnitFiles", 0, files, runtime, force).Store(&result)
+	if err != nil {
+		return nil, err
+	}
+
+	resultInterface := make([]interface{}, len(result))
+	for i := range result {
+		resultInterface[i] = result[i]
+	}
+
+	changes := make([]LinkUnitFileChange, len(result))
+	changesInterface := make([]interface{}, len(changes))
+	for i := range changes {
+		changesInterface[i] = &changes[i]
+	}
+
+	err = dbus.Store(resultInterface, changesInterface...)
+	if err != nil {
+		return nil, err
+	}
+
+	return changes, nil
+}
+
+// EnableUnitFiles() may be used to enable one or more units in the system (by
+// creating symlinks to them in /etc or /run).
+//
+// It takes a list of unit files to enable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and two booleans: the first controls whether the unit shall
+// be enabled for runtime only (true, /run), or persistently (false, /etc).
+// The second one controls whether symlinks pointing to other units shall
+// be replaced if necessary.
+//
+// This call returns one boolean and an array with the changes made. The
+// boolean signals whether the unit files contained any enablement
+// information (i.e. an [Install]) section. The changes list consists of
+// structures with three strings: the type of the change (one of symlink
+// or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) EnableUnitFiles(files []string, runtime bool, force bool) (bool, []EnableUnitFileChange, error) {
+	var carries_install_info bool
+
+	result := make([][]interface{}, 0)
+	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.EnableUnitFiles", 0, files, runtime, force).Store(&carries_install_info, &result)
+	if err != nil {
+		return false, nil, err
+	}
+
+	resultInterface := make([]interface{}, len(result))
+	for i := range result {
+		resultInterface[i] = result[i]
+	}
+
+	changes := make([]EnableUnitFileChange, len(result))
+	changesInterface := make([]interface{}, len(changes))
+	for i := range changes {
+		changesInterface[i] = &changes[i]
+	}
+
+	err = dbus.Store(resultInterface, changesInterface...)
+	if err != nil {
+		return false, nil, err
+	}
+
+	return carries_install_info, changes, nil
+}
+
+type EnableUnitFileChange struct {
+	Type        string // Type of the change (one of symlink or unlink)
+	Filename    string // File name of the symlink
+	Destination string // Destination of the symlink
+}
+
+// DisableUnitFiles() may be used to disable one or more units in the system (by
+// removing symlinks to them from /etc or /run).
+//
+// It takes a list of unit files to disable (either just file names or full
+// absolute paths if the unit files are residing outside the usual unit
+// search paths), and one boolean: whether the unit was enabled for runtime
+// only (true, /run), or persistently (false, /etc).
+//
+// This call returns an array with the changes made. The changes list
+// consists of structures with three strings: the type of the change (one of
+// symlink or unlink), the file name of the symlink and the destination of the
+// symlink.
+func (c *Conn) DisableUnitFiles(files []string, runtime bool) ([]DisableUnitFileChange, error) {
+	result := make([][]interface{}, 0)
+	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.DisableUnitFiles", 0, files, runtime).Store(&result)
+	if err != nil {
+		return nil, err
+	}
+
+	resultInterface := make([]interface{}, len(result))
+	for i := range result {
+		resultInterface[i] = result[i]
+	}
+
+	changes := make([]DisableUnitFileChange, len(result))
+	changesInterface := make([]interface{}, len(changes))
+	for i := range changes {
+		changesInterface[i] = &changes[i]
+	}
+
+	err = dbus.Store(resultInterface, changesInterface...)
+	if err != nil {
+		return nil, err
+	}
+
+	return changes, nil
+}
+
+type DisableUnitFileChange struct {
+	Type        string // Type of the change (one of symlink or unlink)
+	Filename    string // File name of the symlink
+	Destination string // Destination of the symlink
+}
+
+// MaskUnitFiles masks one or more units in the system
+//
+// It takes three arguments:
+//   * list of units to mask (either just file names or full
+//     absolute paths if the unit files are residing outside
+//     the usual unit search paths)
+//   * runtime to specify whether the unit was enabled for runtime
+//     only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
+//   * force flag
+func (c *Conn) MaskUnitFiles(files []string, runtime bool, force bool) ([]MaskUnitFileChange, error) {
+	result := make([][]interface{}, 0)
+	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.MaskUnitFiles", 0, files, runtime, force).Store(&result)
+	if err != nil {
+		return nil, err
+	}
+
+	resultInterface := make([]interface{}, len(result))
+	for i := range result {
+		resultInterface[i] = result[i]
+	}
+
+	changes := make([]MaskUnitFileChange, len(result))
+	changesInterface := make([]interface{}, len(changes))
+	for i := range changes {
+		changesInterface[i] = &changes[i]
+	}
+
+	err = dbus.Store(resultInterface, changesInterface...)
+	if err != nil {
+		return nil, err
+	}
+
+	return changes, nil
+}
+
+type MaskUnitFileChange struct {
+	Type        string // Type of the change (one of symlink or unlink)
+	Filename    string // File name of the symlink
+	Destination string // Destination of the symlink
+}
+
+// UnmaskUnitFiles unmasks one or more units in the system
+//
+// It takes two arguments:
+//   * list of unit files to mask (either just file names or full
+//     absolute paths if the unit files are residing outside
+//     the usual unit search paths)
+//   * runtime to specify whether the unit was enabled for runtime
+//     only (true, /run/systemd/..), or persistently (false, /etc/systemd/..)
+func (c *Conn) UnmaskUnitFiles(files []string, runtime bool) ([]UnmaskUnitFileChange, error) {
+	result := make([][]interface{}, 0)
+	err := c.sysobj.Call("org.freedesktop.systemd1.Manager.UnmaskUnitFiles", 0, files, runtime).Store(&result)
+	if err != nil {
+		return nil, err
+	}
+
+	resultInterface := make([]interface{}, len(result))
+	for i := range result {
+		resultInterface[i] = result[i]
+	}
+
+	changes := make([]UnmaskUnitFileChange, len(result))
+	changesInterface := make([]interface{}, len(changes))
+	for i := range changes {
+		changesInterface[i] = &changes[i]
+	}
+
+	err = dbus.Store(resultInterface, changesInterface...)
+	if err != nil {
+		return nil, err
+	}
+
+	return changes, nil
+}
+
+type UnmaskUnitFileChange struct {
+	Type        string // Type of the change (one of symlink or unlink)
+	Filename    string // File name of the symlink
+	Destination string // Destination of the symlink
+}
+
+// Reload instructs systemd to scan for and reload unit files. This is
+// equivalent to a 'systemctl daemon-reload'.
+func (c *Conn) Reload() error {
+	return c.sysobj.Call("org.freedesktop.systemd1.Manager.Reload", 0).Store()
+}
+
+func unitPath(name string) dbus.ObjectPath {
+	return dbus.ObjectPath("/org/freedesktop/systemd1/unit/" + PathBusEscape(name))
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/properties.go b/vendor/github.com/coreos/go-systemd/dbus/properties.go
new file mode 100644
index 0000000..6c81895
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/properties.go
@@ -0,0 +1,237 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+	"github.com/godbus/dbus"
+)
+
+// From the systemd docs:
+//
+// The properties array of StartTransientUnit() may take many of the settings
+// that may also be configured in unit files. Not all parameters are currently
+// accepted though, but we plan to cover more properties with future release.
+// Currently you may set the Description, Slice and all dependency types of
+// units, as well as RemainAfterExit, ExecStart for service units,
+// TimeoutStopUSec and PIDs for scope units, and CPUAccounting, CPUShares,
+// BlockIOAccounting, BlockIOWeight, BlockIOReadBandwidth,
+// BlockIOWriteBandwidth, BlockIODeviceWeight, MemoryAccounting, MemoryLimit,
+// DevicePolicy, DeviceAllow for services/scopes/slices. These fields map
+// directly to their counterparts in unit files and as normal D-Bus object
+// properties. The exception here is the PIDs field of scope units which is
+// used for construction of the scope only and specifies the initial PIDs to
+// add to the scope object.
+
+type Property struct {
+	Name  string
+	Value dbus.Variant
+}
+
+type PropertyCollection struct {
+	Name       string
+	Properties []Property
+}
+
+type execStart struct {
+	Path             string   // the binary path to execute
+	Args             []string // an array with all arguments to pass to the executed command, starting with argument 0
+	UncleanIsFailure bool     // a boolean whether it should be considered a failure if the process exits uncleanly
+}
+
+// PropExecStart sets the ExecStart service property.  The first argument is a
+// slice with the binary path to execute followed by the arguments to pass to
+// the executed command. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
+func PropExecStart(command []string, uncleanIsFailure bool) Property {
+	execStarts := []execStart{
+		execStart{
+			Path:             command[0],
+			Args:             command,
+			UncleanIsFailure: uncleanIsFailure,
+		},
+	}
+
+	return Property{
+		Name:  "ExecStart",
+		Value: dbus.MakeVariant(execStarts),
+	}
+}
+
+// PropRemainAfterExit sets the RemainAfterExit service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#RemainAfterExit=
+func PropRemainAfterExit(b bool) Property {
+	return Property{
+		Name:  "RemainAfterExit",
+		Value: dbus.MakeVariant(b),
+	}
+}
+
+// PropType sets the Type service property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.service.html#Type=
+func PropType(t string) Property {
+	return Property{
+		Name:  "Type",
+		Value: dbus.MakeVariant(t),
+	}
+}
+
+// PropDescription sets the Description unit property. See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit#Description=
+func PropDescription(desc string) Property {
+	return Property{
+		Name:  "Description",
+		Value: dbus.MakeVariant(desc),
+	}
+}
+
+func propDependency(name string, units []string) Property {
+	return Property{
+		Name:  name,
+		Value: dbus.MakeVariant(units),
+	}
+}
+
+// PropRequires sets the Requires unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requires=
+func PropRequires(units ...string) Property {
+	return propDependency("Requires", units)
+}
+
+// PropRequiresOverridable sets the RequiresOverridable unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresOverridable=
+func PropRequiresOverridable(units ...string) Property {
+	return propDependency("RequiresOverridable", units)
+}
+
+// PropRequisite sets the Requisite unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Requisite=
+func PropRequisite(units ...string) Property {
+	return propDependency("Requisite", units)
+}
+
+// PropRequisiteOverridable sets the RequisiteOverridable unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequisiteOverridable=
+func PropRequisiteOverridable(units ...string) Property {
+	return propDependency("RequisiteOverridable", units)
+}
+
+// PropWants sets the Wants unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Wants=
+func PropWants(units ...string) Property {
+	return propDependency("Wants", units)
+}
+
+// PropBindsTo sets the BindsTo unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#BindsTo=
+func PropBindsTo(units ...string) Property {
+	return propDependency("BindsTo", units)
+}
+
+// PropRequiredBy sets the RequiredBy unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredBy=
+func PropRequiredBy(units ...string) Property {
+	return propDependency("RequiredBy", units)
+}
+
+// PropRequiredByOverridable sets the RequiredByOverridable unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiredByOverridable=
+func PropRequiredByOverridable(units ...string) Property {
+	return propDependency("RequiredByOverridable", units)
+}
+
+// PropWantedBy sets the WantedBy unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#WantedBy=
+func PropWantedBy(units ...string) Property {
+	return propDependency("WantedBy", units)
+}
+
+// PropBoundBy sets the BoundBy unit property.  See
+// http://www.freedesktop.org/software/systemd/main/systemd.unit.html#BoundBy=
+func PropBoundBy(units ...string) Property {
+	return propDependency("BoundBy", units)
+}
+
+// PropConflicts sets the Conflicts unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Conflicts=
+func PropConflicts(units ...string) Property {
+	return propDependency("Conflicts", units)
+}
+
+// PropConflictedBy sets the ConflictedBy unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#ConflictedBy=
+func PropConflictedBy(units ...string) Property {
+	return propDependency("ConflictedBy", units)
+}
+
+// PropBefore sets the Before unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Before=
+func PropBefore(units ...string) Property {
+	return propDependency("Before", units)
+}
+
+// PropAfter sets the After unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#After=
+func PropAfter(units ...string) Property {
+	return propDependency("After", units)
+}
+
+// PropOnFailure sets the OnFailure unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#OnFailure=
+func PropOnFailure(units ...string) Property {
+	return propDependency("OnFailure", units)
+}
+
+// PropTriggers sets the Triggers unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#Triggers=
+func PropTriggers(units ...string) Property {
+	return propDependency("Triggers", units)
+}
+
+// PropTriggeredBy sets the TriggeredBy unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#TriggeredBy=
+func PropTriggeredBy(units ...string) Property {
+	return propDependency("TriggeredBy", units)
+}
+
+// PropPropagatesReloadTo sets the PropagatesReloadTo unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#PropagatesReloadTo=
+func PropPropagatesReloadTo(units ...string) Property {
+	return propDependency("PropagatesReloadTo", units)
+}
+
+// PropRequiresMountsFor sets the RequiresMountsFor unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.unit.html#RequiresMountsFor=
+func PropRequiresMountsFor(units ...string) Property {
+	return propDependency("RequiresMountsFor", units)
+}
+
+// PropSlice sets the Slice unit property.  See
+// http://www.freedesktop.org/software/systemd/man/systemd.resource-control.html#Slice=
+func PropSlice(slice string) Property {
+	return Property{
+		Name:  "Slice",
+		Value: dbus.MakeVariant(slice),
+	}
+}
+
+// PropPids sets the PIDs field of scope units used in the initial construction
+// of the scope only and specifies the initial PIDs to add to the scope object.
+// See https://www.freedesktop.org/wiki/Software/systemd/ControlGroupInterface/#properties
+func PropPids(pids ...uint32) Property {
+	return Property{
+		Name:  "PIDs",
+		Value: dbus.MakeVariant(pids),
+	}
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/set.go b/vendor/github.com/coreos/go-systemd/dbus/set.go
new file mode 100644
index 0000000..f92e6fb
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/set.go
@@ -0,0 +1,47 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+type set struct {
+	data map[string]bool
+}
+
+func (s *set) Add(value string) {
+	s.data[value] = true
+}
+
+func (s *set) Remove(value string) {
+	delete(s.data, value)
+}
+
+func (s *set) Contains(value string) (exists bool) {
+	_, exists = s.data[value]
+	return
+}
+
+func (s *set) Length() int {
+	return len(s.data)
+}
+
+func (s *set) Values() (values []string) {
+	for val, _ := range s.data {
+		values = append(values, val)
+	}
+	return
+}
+
+func newSet() *set {
+	return &set{make(map[string]bool)}
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription.go b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
new file mode 100644
index 0000000..9964514
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/subscription.go
@@ -0,0 +1,250 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+	"errors"
+	"time"
+
+	"github.com/godbus/dbus"
+)
+
+const (
+	cleanIgnoreInterval = int64(10 * time.Second)
+	ignoreInterval      = int64(30 * time.Millisecond)
+)
+
+// Subscribe sets up this connection to subscribe to all systemd dbus events.
+// This is required before calling SubscribeUnits. When the connection closes
+// systemd will automatically stop sending signals so there is no need to
+// explicitly call Unsubscribe().
+func (c *Conn) Subscribe() error {
+	c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+		"type='signal',interface='org.freedesktop.systemd1.Manager',member='UnitNew'")
+	c.sigconn.BusObject().Call("org.freedesktop.DBus.AddMatch", 0,
+		"type='signal',interface='org.freedesktop.DBus.Properties',member='PropertiesChanged'")
+
+	err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Subscribe", 0).Store()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Unsubscribe this connection from systemd dbus events.
+func (c *Conn) Unsubscribe() error {
+	err := c.sigobj.Call("org.freedesktop.systemd1.Manager.Unsubscribe", 0).Store()
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (c *Conn) dispatch() {
+	ch := make(chan *dbus.Signal, signalBuffer)
+
+	c.sigconn.Signal(ch)
+
+	go func() {
+		for {
+			signal, ok := <-ch
+			if !ok {
+				return
+			}
+
+			if signal.Name == "org.freedesktop.systemd1.Manager.JobRemoved" {
+				c.jobComplete(signal)
+			}
+
+			if c.subscriber.updateCh == nil {
+				continue
+			}
+
+			var unitPath dbus.ObjectPath
+			switch signal.Name {
+			case "org.freedesktop.systemd1.Manager.JobRemoved":
+				unitName := signal.Body[2].(string)
+				c.sysobj.Call("org.freedesktop.systemd1.Manager.GetUnit", 0, unitName).Store(&unitPath)
+			case "org.freedesktop.systemd1.Manager.UnitNew":
+				unitPath = signal.Body[1].(dbus.ObjectPath)
+			case "org.freedesktop.DBus.Properties.PropertiesChanged":
+				if signal.Body[0].(string) == "org.freedesktop.systemd1.Unit" {
+					unitPath = signal.Path
+				}
+			}
+
+			if unitPath == dbus.ObjectPath("") {
+				continue
+			}
+
+			c.sendSubStateUpdate(unitPath)
+		}
+	}()
+}
+
+// Returns two unbuffered channels which will receive all changed units every
+// interval.  Deleted units are sent as nil.
+func (c *Conn) SubscribeUnits(interval time.Duration) (<-chan map[string]*UnitStatus, <-chan error) {
+	return c.SubscribeUnitsCustom(interval, 0, func(u1, u2 *UnitStatus) bool { return *u1 != *u2 }, nil)
+}
+
+// SubscribeUnitsCustom is like SubscribeUnits but lets you specify the buffer
+// size of the channels, the comparison function for detecting changes and a filter
+// function for cutting down on the noise that your channel receives.
+func (c *Conn) SubscribeUnitsCustom(interval time.Duration, buffer int, isChanged func(*UnitStatus, *UnitStatus) bool, filterUnit func(string) bool) (<-chan map[string]*UnitStatus, <-chan error) {
+	old := make(map[string]*UnitStatus)
+	statusChan := make(chan map[string]*UnitStatus, buffer)
+	errChan := make(chan error, buffer)
+
+	go func() {
+		for {
+			timerChan := time.After(interval)
+
+			units, err := c.ListUnits()
+			if err == nil {
+				cur := make(map[string]*UnitStatus)
+				for i := range units {
+					if filterUnit != nil && filterUnit(units[i].Name) {
+						continue
+					}
+					cur[units[i].Name] = &units[i]
+				}
+
+				// add all new or changed units
+				changed := make(map[string]*UnitStatus)
+				for n, u := range cur {
+					if oldU, ok := old[n]; !ok || isChanged(oldU, u) {
+						changed[n] = u
+					}
+					delete(old, n)
+				}
+
+				// add all deleted units
+				for oldN := range old {
+					changed[oldN] = nil
+				}
+
+				old = cur
+
+				if len(changed) != 0 {
+					statusChan <- changed
+				}
+			} else {
+				errChan <- err
+			}
+
+			<-timerChan
+		}
+	}()
+
+	return statusChan, errChan
+}
+
+type SubStateUpdate struct {
+	UnitName string
+	SubState string
+}
+
+// SetSubStateSubscriber writes to updateCh when any unit's substate changes.
+// Although this writes to updateCh on every state change, the reported state
+// may be more recent than the change that generated it (due to an unavoidable
+// race in the systemd dbus interface).  That is, this method provides a good
+// way to keep a current view of all units' states, but is not guaranteed to
+// show every state transition they go through.  Furthermore, state changes
+// will only be written to the channel with non-blocking writes.  If updateCh
+// is full, it attempts to write an error to errCh; if errCh is full, the error
+// passes silently.
+func (c *Conn) SetSubStateSubscriber(updateCh chan<- *SubStateUpdate, errCh chan<- error) {
+	c.subscriber.Lock()
+	defer c.subscriber.Unlock()
+	c.subscriber.updateCh = updateCh
+	c.subscriber.errCh = errCh
+}
+
+func (c *Conn) sendSubStateUpdate(path dbus.ObjectPath) {
+	c.subscriber.Lock()
+	defer c.subscriber.Unlock()
+
+	if c.shouldIgnore(path) {
+		return
+	}
+
+	info, err := c.GetUnitProperties(string(path))
+	if err != nil {
+		select {
+		case c.subscriber.errCh <- err:
+		default:
+		}
+	}
+
+	name := info["Id"].(string)
+	substate := info["SubState"].(string)
+
+	update := &SubStateUpdate{name, substate}
+	select {
+	case c.subscriber.updateCh <- update:
+	default:
+		select {
+		case c.subscriber.errCh <- errors.New("update channel full!"):
+		default:
+		}
+	}
+
+	c.updateIgnore(path, info)
+}
+
+// The ignore functions work around a wart in the systemd dbus interface.
+// Requesting the properties of an unloaded unit will cause systemd to send a
+// pair of UnitNew/UnitRemoved signals.  Because we need to get a unit's
+// properties on UnitNew (as that's the only indication of a new unit coming up
+// for the first time), we would enter an infinite loop if we did not attempt
+// to detect and ignore these spurious signals.  The signal themselves are
+// indistinguishable from relevant ones, so we (somewhat hackishly) ignore an
+// unloaded unit's signals for a short time after requesting its properties.
+// This means that we will miss e.g. a transient unit being restarted
+// *immediately* upon failure and also a transient unit being started
+// immediately after requesting its status (with systemctl status, for example,
+// because this causes a UnitNew signal to be sent which then causes us to fetch
+// the properties).
+
+func (c *Conn) shouldIgnore(path dbus.ObjectPath) bool {
+	t, ok := c.subscriber.ignore[path]
+	return ok && t >= time.Now().UnixNano()
+}
+
+func (c *Conn) updateIgnore(path dbus.ObjectPath, info map[string]interface{}) {
+	c.cleanIgnore()
+
+	// unit is unloaded - it will trigger bad systemd dbus behavior
+	if info["LoadState"].(string) == "not-found" {
+		c.subscriber.ignore[path] = time.Now().UnixNano() + ignoreInterval
+	}
+}
+
+// without this, ignore would grow unboundedly over time
+func (c *Conn) cleanIgnore() {
+	now := time.Now().UnixNano()
+	if c.subscriber.cleanIgnore < now {
+		c.subscriber.cleanIgnore = now + cleanIgnoreInterval
+
+		for p, t := range c.subscriber.ignore {
+			if t < now {
+				delete(c.subscriber.ignore, p)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
new file mode 100644
index 0000000..5b408d5
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/dbus/subscription_set.go
@@ -0,0 +1,57 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package dbus
+
+import (
+	"time"
+)
+
+// SubscriptionSet returns a subscription set which is like conn.Subscribe but
+// can filter to only return events for a set of units.
+type SubscriptionSet struct {
+	*set
+	conn *Conn
+}
+
+func (s *SubscriptionSet) filter(unit string) bool {
+	return !s.Contains(unit)
+}
+
+// Subscribe starts listening for dbus events for all of the units in the set.
+// Returns channels identical to conn.SubscribeUnits.
+func (s *SubscriptionSet) Subscribe() (<-chan map[string]*UnitStatus, <-chan error) {
+	// TODO: Make fully evented by using systemd 209 with properties changed values
+	return s.conn.SubscribeUnitsCustom(time.Second, 0,
+		mismatchUnitStatus,
+		func(unit string) bool { return s.filter(unit) },
+	)
+}
+
+// NewSubscriptionSet returns a new subscription set.
+func (conn *Conn) NewSubscriptionSet() *SubscriptionSet {
+	return &SubscriptionSet{newSet(), conn}
+}
+
+// mismatchUnitStatus returns true if the provided UnitStatus objects
+// are not equivalent. false is returned if the objects are equivalent.
+// Only the Name, Description and state-related fields are used in
+// the comparison.
+func mismatchUnitStatus(u1, u2 *UnitStatus) bool {
+	return u1.Name != u2.Name ||
+		u1.Description != u2.Description ||
+		u1.LoadState != u2.LoadState ||
+		u1.ActiveState != u2.ActiveState ||
+		u1.SubState != u2.SubState
+}
diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go
index 6c3f5b9..7f43499 100644
--- a/vendor/github.com/coreos/go-systemd/journal/journal.go
+++ b/vendor/github.com/coreos/go-systemd/journal/journal.go
@@ -90,6 +90,7 @@
 		if err != nil {
 			return journalError(err.Error())
 		}
+		defer file.Close()
 		_, err = io.Copy(file, data)
 		if err != nil {
 			return journalError(err.Error())
diff --git a/vendor/github.com/dmcgowan/go-tar/LICENSE b/vendor/github.com/dmcgowan/go-tar/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/dmcgowan/go-tar/common.go b/vendor/github.com/dmcgowan/go-tar/common.go
new file mode 100644
index 0000000..d2ae66d
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/common.go
@@ -0,0 +1,286 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package tar implements access to tar archives.
+// It aims to cover most of the variations, including those produced
+// by GNU and BSD tars.
+//
+// References:
+//   http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5
+//   http://www.gnu.org/software/tar/manual/html_node/Standard.html
+//   http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html
+package tar
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path"
+	"time"
+)
+
+// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit
+// architectures. If a large value is encountered when decoding, the result
+// stored in Header will be the truncated version.
+
+// Header type flags.
+const (
+	TypeReg           = '0'    // regular file
+	TypeRegA          = '\x00' // regular file
+	TypeLink          = '1'    // hard link
+	TypeSymlink       = '2'    // symbolic link
+	TypeChar          = '3'    // character device node
+	TypeBlock         = '4'    // block device node
+	TypeDir           = '5'    // directory
+	TypeFifo          = '6'    // fifo node
+	TypeCont          = '7'    // reserved
+	TypeXHeader       = 'x'    // extended header
+	TypeXGlobalHeader = 'g'    // global extended header
+	TypeGNULongName   = 'L'    // Next file has a long name
+	TypeGNULongLink   = 'K'    // Next file symlinks to a file w/ a long name
+	TypeGNUSparse     = 'S'    // sparse file
+)
+
+// A Header represents a single header in a tar archive.
+// Some fields may not be populated.
+type Header struct {
+	Name       string    // name of header file entry
+	Mode       int64     // permission and mode bits
+	Uid        int       // user id of owner
+	Gid        int       // group id of owner
+	Size       int64     // length in bytes
+	ModTime    time.Time // modified time
+	Typeflag   byte      // type of header entry
+	Linkname   string    // target name of link
+	Uname      string    // user name of owner
+	Gname      string    // group name of owner
+	Devmajor   int64     // major number of character or block device
+	Devminor   int64     // minor number of character or block device
+	AccessTime time.Time // access time
+	ChangeTime time.Time // status change time
+	Xattrs     map[string]string
+}
+
+// FileInfo returns an os.FileInfo for the Header.
+func (h *Header) FileInfo() os.FileInfo {
+	return headerFileInfo{h}
+}
+
+// headerFileInfo implements os.FileInfo.
+type headerFileInfo struct {
+	h *Header
+}
+
+func (fi headerFileInfo) Size() int64        { return fi.h.Size }
+func (fi headerFileInfo) IsDir() bool        { return fi.Mode().IsDir() }
+func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
+func (fi headerFileInfo) Sys() interface{}   { return fi.h }
+
+// Name returns the base name of the file.
+func (fi headerFileInfo) Name() string {
+	if fi.IsDir() {
+		return path.Base(path.Clean(fi.h.Name))
+	}
+	return path.Base(fi.h.Name)
+}
+
+// Mode returns the permission and mode bits for the headerFileInfo.
+func (fi headerFileInfo) Mode() (mode os.FileMode) {
+	// Set file permission bits.
+	mode = os.FileMode(fi.h.Mode).Perm()
+
+	// Set setuid, setgid and sticky bits.
+	if fi.h.Mode&c_ISUID != 0 {
+		// setuid
+		mode |= os.ModeSetuid
+	}
+	if fi.h.Mode&c_ISGID != 0 {
+		// setgid
+		mode |= os.ModeSetgid
+	}
+	if fi.h.Mode&c_ISVTX != 0 {
+		// sticky
+		mode |= os.ModeSticky
+	}
+
+	// Set file mode bits.
+	// clear perm, setuid, setgid and sticky bits.
+	m := os.FileMode(fi.h.Mode) &^ 07777
+	if m == c_ISDIR {
+		// directory
+		mode |= os.ModeDir
+	}
+	if m == c_ISFIFO {
+		// named pipe (FIFO)
+		mode |= os.ModeNamedPipe
+	}
+	if m == c_ISLNK {
+		// symbolic link
+		mode |= os.ModeSymlink
+	}
+	if m == c_ISBLK {
+		// device file
+		mode |= os.ModeDevice
+	}
+	if m == c_ISCHR {
+		// Unix character device
+		mode |= os.ModeDevice
+		mode |= os.ModeCharDevice
+	}
+	if m == c_ISSOCK {
+		// Unix domain socket
+		mode |= os.ModeSocket
+	}
+
+	switch fi.h.Typeflag {
+	case TypeSymlink:
+		// symbolic link
+		mode |= os.ModeSymlink
+	case TypeChar:
+		// character device node
+		mode |= os.ModeDevice
+		mode |= os.ModeCharDevice
+	case TypeBlock:
+		// block device node
+		mode |= os.ModeDevice
+	case TypeDir:
+		// directory
+		mode |= os.ModeDir
+	case TypeFifo:
+		// fifo node
+		mode |= os.ModeNamedPipe
+	}
+
+	return mode
+}
+
+// sysStat, if non-nil, populates h from system-dependent fields of fi.
+var sysStat func(fi os.FileInfo, h *Header) error
+
+// Mode constants from the tar spec.
+const (
+	c_ISUID  = 04000   // Set uid
+	c_ISGID  = 02000   // Set gid
+	c_ISVTX  = 01000   // Save text (sticky bit)
+	c_ISDIR  = 040000  // Directory
+	c_ISFIFO = 010000  // FIFO
+	c_ISREG  = 0100000 // Regular file
+	c_ISLNK  = 0120000 // Symbolic link
+	c_ISBLK  = 060000  // Block special file
+	c_ISCHR  = 020000  // Character special file
+	c_ISSOCK = 0140000 // Socket
+)
+
+// Keywords for the PAX Extended Header
+const (
+	paxAtime    = "atime"
+	paxCharset  = "charset"
+	paxComment  = "comment"
+	paxCtime    = "ctime" // please note that ctime is not a valid pax header.
+	paxGid      = "gid"
+	paxGname    = "gname"
+	paxLinkpath = "linkpath"
+	paxMtime    = "mtime"
+	paxPath     = "path"
+	paxSize     = "size"
+	paxUid      = "uid"
+	paxUname    = "uname"
+	paxXattr    = "SCHILY.xattr."
+	paxNone     = ""
+)
+
+// FileInfoHeader creates a partially-populated Header from fi.
+// If fi describes a symlink, FileInfoHeader records link as the link target.
+// If fi describes a directory, a slash is appended to the name.
+// Because os.FileInfo's Name method returns only the base name of
+// the file it describes, it may be necessary to modify the Name field
+// of the returned header to provide the full path name of the file.
+func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
+	if fi == nil {
+		return nil, errors.New("tar: FileInfo is nil")
+	}
+	fm := fi.Mode()
+	h := &Header{
+		Name:    fi.Name(),
+		ModTime: fi.ModTime(),
+		Mode:    int64(fm.Perm()), // or'd with c_IS* constants later
+	}
+	switch {
+	case fm.IsRegular():
+		h.Mode |= c_ISREG
+		h.Typeflag = TypeReg
+		h.Size = fi.Size()
+	case fi.IsDir():
+		h.Typeflag = TypeDir
+		h.Mode |= c_ISDIR
+		h.Name += "/"
+	case fm&os.ModeSymlink != 0:
+		h.Typeflag = TypeSymlink
+		h.Mode |= c_ISLNK
+		h.Linkname = link
+	case fm&os.ModeDevice != 0:
+		if fm&os.ModeCharDevice != 0 {
+			h.Mode |= c_ISCHR
+			h.Typeflag = TypeChar
+		} else {
+			h.Mode |= c_ISBLK
+			h.Typeflag = TypeBlock
+		}
+	case fm&os.ModeNamedPipe != 0:
+		h.Typeflag = TypeFifo
+		h.Mode |= c_ISFIFO
+	case fm&os.ModeSocket != 0:
+		h.Mode |= c_ISSOCK
+	default:
+		return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm)
+	}
+	if fm&os.ModeSetuid != 0 {
+		h.Mode |= c_ISUID
+	}
+	if fm&os.ModeSetgid != 0 {
+		h.Mode |= c_ISGID
+	}
+	if fm&os.ModeSticky != 0 {
+		h.Mode |= c_ISVTX
+	}
+	// If possible, populate additional fields from OS-specific
+	// FileInfo fields.
+	if sys, ok := fi.Sys().(*Header); ok {
+		// This FileInfo came from a Header (not the OS). Use the
+		// original Header to populate all remaining fields.
+		h.Uid = sys.Uid
+		h.Gid = sys.Gid
+		h.Uname = sys.Uname
+		h.Gname = sys.Gname
+		h.AccessTime = sys.AccessTime
+		h.ChangeTime = sys.ChangeTime
+		if sys.Xattrs != nil {
+			h.Xattrs = make(map[string]string)
+			for k, v := range sys.Xattrs {
+				h.Xattrs[k] = v
+			}
+		}
+		if sys.Typeflag == TypeLink {
+			// hard link
+			h.Typeflag = TypeLink
+			h.Size = 0
+			h.Linkname = sys.Linkname
+		}
+	}
+	if sysStat != nil {
+		return h, sysStat(fi, h)
+	}
+	return h, nil
+}
+
+// isHeaderOnlyType checks if the given type flag is of the type that has no
+// data section even if a size is specified.
+func isHeaderOnlyType(flag byte) bool {
+	switch flag {
+	case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/format.go b/vendor/github.com/dmcgowan/go-tar/format.go
new file mode 100644
index 0000000..c2c9910
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/format.go
@@ -0,0 +1,197 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// Constants to identify various tar formats.
+const (
+	// The format is unknown.
+	formatUnknown = (1 << iota) / 2 // Sequence of 0, 1, 2, 4, 8, etc...
+
+	// The format of the original Unix V7 tar tool prior to standardization.
+	formatV7
+
+	// The old and new GNU formats, which are incompatible with USTAR.
+	// This does cover the old GNU sparse extension.
+	// This does not cover the GNU sparse extensions using PAX headers,
+	// versions 0.0, 0.1, and 1.0; these fall under the PAX format.
+	formatGNU
+
+	// Schily's tar format, which is incompatible with USTAR.
+	// This does not cover STAR extensions to the PAX format; these fall under
+	// the PAX format.
+	formatSTAR
+
+	// USTAR is the former standardization of tar defined in POSIX.1-1988.
+	// This is incompatible with the GNU and STAR formats.
+	formatUSTAR
+
+	// PAX is the latest standardization of tar defined in POSIX.1-2001.
+	// This is an extension of USTAR and is "backwards compatible" with it.
+	//
+	// Some newer formats add their own extensions to PAX, such as GNU sparse
+	// files and SCHILY extended attributes. Since they are backwards compatible
+	// with PAX, they will be labelled as "PAX".
+	formatPAX
+)
+
+// Magics used to identify various formats.
+const (
+	magicGNU, versionGNU     = "ustar ", " \x00"
+	magicUSTAR, versionUSTAR = "ustar\x00", "00"
+	trailerSTAR              = "tar\x00"
+)
+
+// Size constants from various tar specifications.
+const (
+	blockSize  = 512 // Size of each block in a tar stream
+	nameSize   = 100 // Max length of the name field in USTAR format
+	prefixSize = 155 // Max length of the prefix field in USTAR format
+)
+
+var zeroBlock block
+
+type block [blockSize]byte
+
+// Convert block to any number of formats.
+func (b *block) V7() *headerV7       { return (*headerV7)(b) }
+func (b *block) GNU() *headerGNU     { return (*headerGNU)(b) }
+func (b *block) STAR() *headerSTAR   { return (*headerSTAR)(b) }
+func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) }
+func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) }
+
+// GetFormat checks that the block is a valid tar header based on the checksum.
+// It then attempts to guess the specific format based on magic values.
+// If the checksum fails, then formatUnknown is returned.
+func (b *block) GetFormat() (format int) {
+	// Verify checksum.
+	var p parser
+	value := p.parseOctal(b.V7().Chksum())
+	chksum1, chksum2 := b.ComputeChecksum()
+	if p.err != nil || (value != chksum1 && value != chksum2) {
+		return formatUnknown
+	}
+
+	// Guess the magic values.
+	magic := string(b.USTAR().Magic())
+	version := string(b.USTAR().Version())
+	trailer := string(b.STAR().Trailer())
+	switch {
+	case magic == magicUSTAR && trailer == trailerSTAR:
+		return formatSTAR
+	case magic == magicUSTAR:
+		return formatUSTAR
+	case magic == magicGNU && version == versionGNU:
+		return formatGNU
+	default:
+		return formatV7
+	}
+}
+
+// SetFormat writes the magic values necessary for specified format
+// and then updates the checksum accordingly.
+func (b *block) SetFormat(format int) {
+	// Set the magic values.
+	switch format {
+	case formatV7:
+		// Do nothing.
+	case formatGNU:
+		copy(b.GNU().Magic(), magicGNU)
+		copy(b.GNU().Version(), versionGNU)
+	case formatSTAR:
+		copy(b.STAR().Magic(), magicUSTAR)
+		copy(b.STAR().Version(), versionUSTAR)
+		copy(b.STAR().Trailer(), trailerSTAR)
+	case formatUSTAR, formatPAX:
+		copy(b.USTAR().Magic(), magicUSTAR)
+		copy(b.USTAR().Version(), versionUSTAR)
+	default:
+		panic("invalid format")
+	}
+
+	// Update checksum.
+	// This field is special in that it is terminated by a NULL then space.
+	var f formatter
+	field := b.V7().Chksum()
+	chksum, _ := b.ComputeChecksum() // Possible values are 256..128776
+	f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143
+	field[7] = ' '
+}
+
+// ComputeChecksum computes the checksum for the header block.
+// POSIX specifies a sum of the unsigned byte values, but the Sun tar used
+// signed byte values.
+// We compute and return both.
+func (b *block) ComputeChecksum() (unsigned, signed int64) {
+	for i, c := range b {
+		if 148 <= i && i < 156 {
+			c = ' ' // Treat the checksum field itself as all spaces.
+		}
+		unsigned += int64(uint8(c))
+		signed += int64(int8(c))
+	}
+	return unsigned, signed
+}
+
+type headerV7 [blockSize]byte
+
+func (h *headerV7) Name() []byte     { return h[000:][:100] }
+func (h *headerV7) Mode() []byte     { return h[100:][:8] }
+func (h *headerV7) UID() []byte      { return h[108:][:8] }
+func (h *headerV7) GID() []byte      { return h[116:][:8] }
+func (h *headerV7) Size() []byte     { return h[124:][:12] }
+func (h *headerV7) ModTime() []byte  { return h[136:][:12] }
+func (h *headerV7) Chksum() []byte   { return h[148:][:8] }
+func (h *headerV7) TypeFlag() []byte { return h[156:][:1] }
+func (h *headerV7) LinkName() []byte { return h[157:][:100] }
+
+type headerGNU [blockSize]byte
+
+func (h *headerGNU) V7() *headerV7       { return (*headerV7)(h) }
+func (h *headerGNU) Magic() []byte       { return h[257:][:6] }
+func (h *headerGNU) Version() []byte     { return h[263:][:2] }
+func (h *headerGNU) UserName() []byte    { return h[265:][:32] }
+func (h *headerGNU) GroupName() []byte   { return h[297:][:32] }
+func (h *headerGNU) DevMajor() []byte    { return h[329:][:8] }
+func (h *headerGNU) DevMinor() []byte    { return h[337:][:8] }
+func (h *headerGNU) AccessTime() []byte  { return h[345:][:12] }
+func (h *headerGNU) ChangeTime() []byte  { return h[357:][:12] }
+func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) }
+func (h *headerGNU) RealSize() []byte    { return h[483:][:12] }
+
+type headerSTAR [blockSize]byte
+
+func (h *headerSTAR) V7() *headerV7      { return (*headerV7)(h) }
+func (h *headerSTAR) Magic() []byte      { return h[257:][:6] }
+func (h *headerSTAR) Version() []byte    { return h[263:][:2] }
+func (h *headerSTAR) UserName() []byte   { return h[265:][:32] }
+func (h *headerSTAR) GroupName() []byte  { return h[297:][:32] }
+func (h *headerSTAR) DevMajor() []byte   { return h[329:][:8] }
+func (h *headerSTAR) DevMinor() []byte   { return h[337:][:8] }
+func (h *headerSTAR) Prefix() []byte     { return h[345:][:131] }
+func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] }
+func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] }
+func (h *headerSTAR) Trailer() []byte    { return h[508:][:4] }
+
+type headerUSTAR [blockSize]byte
+
+func (h *headerUSTAR) V7() *headerV7     { return (*headerV7)(h) }
+func (h *headerUSTAR) Magic() []byte     { return h[257:][:6] }
+func (h *headerUSTAR) Version() []byte   { return h[263:][:2] }
+func (h *headerUSTAR) UserName() []byte  { return h[265:][:32] }
+func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] }
+func (h *headerUSTAR) DevMajor() []byte  { return h[329:][:8] }
+func (h *headerUSTAR) DevMinor() []byte  { return h[337:][:8] }
+func (h *headerUSTAR) Prefix() []byte    { return h[345:][:155] }
+
+type sparseArray []byte
+
+func (s sparseArray) Entry(i int) sparseNode { return (sparseNode)(s[i*24:]) }
+func (s sparseArray) IsExtended() []byte     { return s[24*s.MaxEntries():][:1] }
+func (s sparseArray) MaxEntries() int        { return len(s) / 24 }
+
+type sparseNode []byte
+
+func (s sparseNode) Offset() []byte   { return s[00:][:12] }
+func (s sparseNode) NumBytes() []byte { return s[12:][:12] }
diff --git a/vendor/github.com/dmcgowan/go-tar/reader.go b/vendor/github.com/dmcgowan/go-tar/reader.go
new file mode 100644
index 0000000..a6142c6
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/reader.go
@@ -0,0 +1,800 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+//   - pax extensions
+
+import (
+	"bytes"
+	"errors"
+	"io"
+	"io/ioutil"
+	"math"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	ErrHeader = errors.New("archive/tar: invalid tar header")
+)
+
+// A Reader provides sequential access to the contents of a tar archive.
+// A tar archive consists of a sequence of files.
+// The Next method advances to the next file in the archive (including the first),
+// and then it can be treated as an io.Reader to access the file's data.
+type Reader struct {
+	r    io.Reader
+	pad  int64          // amount of padding (ignored) after current file entry
+	curr numBytesReader // reader for current file entry
+	blk  block          // buffer to use as temporary local storage
+
+	// err is a persistent error.
+	// It is only the responsibility of every exported method of Reader to
+	// ensure that this error is sticky.
+	err error
+}
+
+// A numBytesReader is an io.Reader with a numBytes method, returning the number
+// of bytes remaining in the underlying encoded data.
+type numBytesReader interface {
+	io.Reader
+	numBytes() int64
+}
+
+// A regFileReader is a numBytesReader for reading file data from a tar archive.
+type regFileReader struct {
+	r  io.Reader // underlying reader
+	nb int64     // number of unread bytes for current file entry
+}
+
+// A sparseFileReader is a numBytesReader for reading sparse file data from a
+// tar archive.
+type sparseFileReader struct {
+	rfr   numBytesReader // Reads the sparse-encoded file data
+	sp    []sparseEntry  // The sparse map for the file
+	pos   int64          // Keeps track of file position
+	total int64          // Total size of the file
+}
+
+// A sparseEntry holds a single entry in a sparse file's sparse map.
+//
+// Sparse files are represented using a series of sparseEntrys.
+// Despite the name, a sparseEntry represents an actual data fragment that
+// references data found in the underlying archive stream. All regions not
+// covered by a sparseEntry are logically filled with zeros.
+//
+// For example, if the underlying raw file contains the 10-byte data:
+//	var compactData = "abcdefgh"
+//
+// And the sparse map has the following entries:
+//	var sp = []sparseEntry{
+//		{offset: 2,  numBytes: 5} // Data fragment for [2..7]
+//		{offset: 18, numBytes: 3} // Data fragment for [18..21]
+//	}
+//
+// Then the content of the resulting sparse file with a "real" size of 25 is:
+//	var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4
+type sparseEntry struct {
+	offset   int64 // Starting position of the fragment
+	numBytes int64 // Length of the fragment
+}
+
+// Keywords for GNU sparse files in a PAX extended header
+const (
+	paxGNUSparseNumBlocks = "GNU.sparse.numblocks"
+	paxGNUSparseOffset    = "GNU.sparse.offset"
+	paxGNUSparseNumBytes  = "GNU.sparse.numbytes"
+	paxGNUSparseMap       = "GNU.sparse.map"
+	paxGNUSparseName      = "GNU.sparse.name"
+	paxGNUSparseMajor     = "GNU.sparse.major"
+	paxGNUSparseMinor     = "GNU.sparse.minor"
+	paxGNUSparseSize      = "GNU.sparse.size"
+	paxGNUSparseRealSize  = "GNU.sparse.realsize"
+)
+
+// NewReader creates a new Reader reading from r.
+func NewReader(r io.Reader) *Reader { return &Reader{r: r} }
+
+// Next advances to the next entry in the tar archive.
+//
+// io.EOF is returned at the end of the input.
+func (tr *Reader) Next() (*Header, error) {
+	if tr.err != nil {
+		return nil, tr.err
+	}
+	hdr, err := tr.next()
+	tr.err = err
+	return hdr, err
+}
+
+func (tr *Reader) next() (*Header, error) {
+	var extHdrs map[string]string
+
+	// Externally, Next iterates through the tar archive as if it is a series of
+	// files. Internally, the tar format often uses fake "files" to add meta
+	// data that describes the next file. These meta data "files" should not
+	// normally be visible to the outside. As such, this loop iterates through
+	// one or more "header files" until it finds a "normal file".
+loop:
+	for {
+		if err := tr.skipUnread(); err != nil {
+			return nil, err
+		}
+		hdr, rawHdr, err := tr.readHeader()
+		if err != nil {
+			return nil, err
+		}
+		if err := tr.handleRegularFile(hdr); err != nil {
+			return nil, err
+		}
+
+		// Check for PAX/GNU special headers and files.
+		switch hdr.Typeflag {
+		case TypeXHeader:
+			extHdrs, err = parsePAX(tr)
+			if err != nil {
+				return nil, err
+			}
+			continue loop // This is a meta header affecting the next header
+		case TypeGNULongName, TypeGNULongLink:
+			realname, err := ioutil.ReadAll(tr)
+			if err != nil {
+				return nil, err
+			}
+
+			// Convert GNU extensions to use PAX headers.
+			if extHdrs == nil {
+				extHdrs = make(map[string]string)
+			}
+			var p parser
+			switch hdr.Typeflag {
+			case TypeGNULongName:
+				extHdrs[paxPath] = p.parseString(realname)
+			case TypeGNULongLink:
+				extHdrs[paxLinkpath] = p.parseString(realname)
+			}
+			if p.err != nil {
+				return nil, p.err
+			}
+			continue loop // This is a meta header affecting the next header
+		default:
+			// The old GNU sparse format is handled here since it is technically
+			// just a regular file with additional attributes.
+
+			if err := mergePAX(hdr, extHdrs); err != nil {
+				return nil, err
+			}
+
+			// The extended headers may have updated the size.
+			// Thus, setup the regFileReader again after merging PAX headers.
+			if err := tr.handleRegularFile(hdr); err != nil {
+				return nil, err
+			}
+
+			// Sparse formats rely on being able to read from the logical data
+			// section; there must be a preceding call to handleRegularFile.
+			if err := tr.handleSparseFile(hdr, rawHdr, extHdrs); err != nil {
+				return nil, err
+			}
+			return hdr, nil // This is a file, so stop
+		}
+	}
+}
+
+// handleRegularFile sets up the current file reader and padding such that it
+// can only read the following logical data section. It will properly handle
+// special headers that contain no data section.
+func (tr *Reader) handleRegularFile(hdr *Header) error {
+	nb := hdr.Size
+	if isHeaderOnlyType(hdr.Typeflag) {
+		nb = 0
+	}
+	if nb < 0 {
+		return ErrHeader
+	}
+
+	tr.pad = -nb & (blockSize - 1) // blockSize is a power of two
+	tr.curr = &regFileReader{r: tr.r, nb: nb}
+	return nil
+}
+
+// handleSparseFile checks if the current file is a sparse format of any type
+// and sets the curr reader appropriately.
+func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block, extHdrs map[string]string) error {
+	var sp []sparseEntry
+	var err error
+	if hdr.Typeflag == TypeGNUSparse {
+		sp, err = tr.readOldGNUSparseMap(hdr, rawHdr)
+		if err != nil {
+			return err
+		}
+	} else {
+		sp, err = tr.checkForGNUSparsePAXHeaders(hdr, extHdrs)
+		if err != nil {
+			return err
+		}
+	}
+
+	// If sp is non-nil, then this is a sparse file.
+	// Note that it is possible for len(sp) to be zero.
+	if sp != nil {
+		tr.curr, err = newSparseFileReader(tr.curr, sp, hdr.Size)
+	}
+	return err
+}
+
+// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then
+// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to
+// be treated as a regular file.
+func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) {
+	var sparseFormat string
+
+	// Check for sparse format indicators
+	major, majorOk := headers[paxGNUSparseMajor]
+	minor, minorOk := headers[paxGNUSparseMinor]
+	sparseName, sparseNameOk := headers[paxGNUSparseName]
+	_, sparseMapOk := headers[paxGNUSparseMap]
+	sparseSize, sparseSizeOk := headers[paxGNUSparseSize]
+	sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize]
+
+	// Identify which, if any, sparse format applies from which PAX headers are set
+	if majorOk && minorOk {
+		sparseFormat = major + "." + minor
+	} else if sparseNameOk && sparseMapOk {
+		sparseFormat = "0.1"
+	} else if sparseSizeOk {
+		sparseFormat = "0.0"
+	} else {
+		// Not a PAX format GNU sparse file.
+		return nil, nil
+	}
+
+	// Check for unknown sparse format
+	if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" {
+		return nil, nil
+	}
+
+	// Update hdr from GNU sparse PAX headers
+	if sparseNameOk {
+		hdr.Name = sparseName
+	}
+	if sparseSizeOk {
+		realSize, err := strconv.ParseInt(sparseSize, 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		hdr.Size = realSize
+	} else if sparseRealSizeOk {
+		realSize, err := strconv.ParseInt(sparseRealSize, 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		hdr.Size = realSize
+	}
+
+	// Set up the sparse map, according to the particular sparse format in use
+	var sp []sparseEntry
+	var err error
+	switch sparseFormat {
+	case "0.0", "0.1":
+		sp, err = readGNUSparseMap0x1(headers)
+	case "1.0":
+		sp, err = readGNUSparseMap1x0(tr.curr)
+	}
+	return sp, err
+}
+
+// mergePAX merges well known headers according to PAX standard.
+// In general headers with the same name as those found
+// in the header struct overwrite those found in the header
+// struct with higher precision or longer values. Esp. useful
+// for name and linkname fields.
+func mergePAX(hdr *Header, headers map[string]string) (err error) {
+	var id64 int64
+	for k, v := range headers {
+		switch k {
+		case paxPath:
+			hdr.Name = v
+		case paxLinkpath:
+			hdr.Linkname = v
+		case paxUname:
+			hdr.Uname = v
+		case paxGname:
+			hdr.Gname = v
+		case paxUid:
+			id64, err = strconv.ParseInt(v, 10, 64)
+			hdr.Uid = int(id64) // Integer overflow possible
+		case paxGid:
+			id64, err = strconv.ParseInt(v, 10, 64)
+			hdr.Gid = int(id64) // Integer overflow possible
+		case paxAtime:
+			hdr.AccessTime, err = parsePAXTime(v)
+		case paxMtime:
+			hdr.ModTime, err = parsePAXTime(v)
+		case paxCtime:
+			hdr.ChangeTime, err = parsePAXTime(v)
+		case paxSize:
+			hdr.Size, err = strconv.ParseInt(v, 10, 64)
+		default:
+			if strings.HasPrefix(k, paxXattr) {
+				if hdr.Xattrs == nil {
+					hdr.Xattrs = make(map[string]string)
+				}
+				hdr.Xattrs[k[len(paxXattr):]] = v
+			}
+		}
+		if err != nil {
+			return ErrHeader
+		}
+	}
+	return nil
+}
+
+// parsePAX parses PAX headers.
+// If an extended header (type 'x') is invalid, ErrHeader is returned
+func parsePAX(r io.Reader) (map[string]string, error) {
+	buf, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+	sbuf := string(buf)
+
+	// For GNU PAX sparse format 0.0 support.
+	// This function transforms the sparse format 0.0 headers into format 0.1
+	// headers since 0.0 headers were not PAX compliant.
+	var sparseMap []string
+
+	extHdrs := make(map[string]string)
+	for len(sbuf) > 0 {
+		key, value, residual, err := parsePAXRecord(sbuf)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		sbuf = residual
+
+		switch key {
+		case paxGNUSparseOffset, paxGNUSparseNumBytes:
+			// Validate sparse header order and value.
+			if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
+				(len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
+				strings.Contains(value, ",") {
+				return nil, ErrHeader
+			}
+			sparseMap = append(sparseMap, value)
+		default:
+			// According to PAX specification, a value is stored only if it is
+			// non-empty. Otherwise, the key is deleted.
+			if len(value) > 0 {
+				extHdrs[key] = value
+			} else {
+				delete(extHdrs, key)
+			}
+		}
+	}
+	if len(sparseMap) > 0 {
+		extHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
+	}
+	return extHdrs, nil
+}
+
+// skipUnread skips any unread bytes in the existing file entry, as well as any
+// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is
+// encountered in the data portion; it is okay to hit io.EOF in the padding.
+//
+// Note that this function still works properly even when sparse files are being
+// used since numBytes returns the bytes remaining in the underlying io.Reader.
+func (tr *Reader) skipUnread() error {
+	dataSkip := tr.numBytes()      // Number of data bytes to skip
+	totalSkip := dataSkip + tr.pad // Total number of bytes to skip
+	tr.curr, tr.pad = nil, 0
+
+	// If possible, Seek to the last byte before the end of the data section.
+	// Do this because Seek is often lazy about reporting errors; this will mask
+	// the fact that the tar stream may be truncated. We can rely on the
+	// io.CopyN done shortly afterwards to trigger any IO errors.
+	var seekSkipped int64 // Number of bytes skipped via Seek
+	if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 {
+		// Not all io.Seeker can actually Seek. For example, os.Stdin implements
+		// io.Seeker, but calling Seek always returns an error and performs
+		// no action. Thus, we try an innocent seek to the current position
+		// to see if Seek is really supported.
+		pos1, err := sr.Seek(0, io.SeekCurrent)
+		if err == nil {
+			// Seek seems supported, so perform the real Seek.
+			pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent)
+			if err != nil {
+				return err
+			}
+			seekSkipped = pos2 - pos1
+		}
+	}
+
+	copySkipped, err := io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped)
+	if err == io.EOF && seekSkipped+copySkipped < dataSkip {
+		err = io.ErrUnexpectedEOF
+	}
+	return err
+}
+
+// readHeader reads the next block header and assumes that the underlying reader
+// is already aligned to a block boundary. It returns the raw block of the
+// header in case further processing is required.
+//
+// The err will be set to io.EOF only when one of the following occurs:
+//	* Exactly 0 bytes are read and EOF is hit.
+//	* Exactly 1 block of zeros is read and EOF is hit.
+//	* At least 2 blocks of zeros are read.
+func (tr *Reader) readHeader() (*Header, *block, error) {
+	// Two blocks of zero bytes marks the end of the archive.
+	if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
+		return nil, nil, err // EOF is okay here; exactly 0 bytes read
+	}
+	if bytes.Equal(tr.blk[:], zeroBlock[:]) {
+		if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
+			return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
+		}
+		if bytes.Equal(tr.blk[:], zeroBlock[:]) {
+			return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
+		}
+		return nil, nil, ErrHeader // Zero block and then non-zero block
+	}
+
+	// Verify the header matches a known format.
+	format := tr.blk.GetFormat()
+	if format == formatUnknown {
+		return nil, nil, ErrHeader
+	}
+
+	var p parser
+	hdr := new(Header)
+
+	// Unpack the V7 header.
+	v7 := tr.blk.V7()
+	hdr.Name = p.parseString(v7.Name())
+	hdr.Mode = p.parseNumeric(v7.Mode())
+	hdr.Uid = int(p.parseNumeric(v7.UID()))
+	hdr.Gid = int(p.parseNumeric(v7.GID()))
+	hdr.Size = p.parseNumeric(v7.Size())
+	hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0)
+	hdr.Typeflag = v7.TypeFlag()[0]
+	hdr.Linkname = p.parseString(v7.LinkName())
+
+	// Unpack format specific fields.
+	if format > formatV7 {
+		ustar := tr.blk.USTAR()
+		hdr.Uname = p.parseString(ustar.UserName())
+		hdr.Gname = p.parseString(ustar.GroupName())
+		if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock {
+			hdr.Devmajor = p.parseNumeric(ustar.DevMajor())
+			hdr.Devminor = p.parseNumeric(ustar.DevMinor())
+		}
+
+		var prefix string
+		switch format {
+		case formatUSTAR, formatGNU:
+			// TODO(dsnet): Do not use the prefix field for the GNU format!
+			// See golang.org/issues/12594
+			ustar := tr.blk.USTAR()
+			prefix = p.parseString(ustar.Prefix())
+		case formatSTAR:
+			star := tr.blk.STAR()
+			prefix = p.parseString(star.Prefix())
+			hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0)
+			hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0)
+		}
+		if len(prefix) > 0 {
+			hdr.Name = prefix + "/" + hdr.Name
+		}
+	}
+	return hdr, &tr.blk, p.err
+}
+
+// readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
+// The sparse map is stored in the tar header if it's small enough.
+// If it's larger than four entries, then one or more extension headers are used
+// to store the rest of the sparse map.
+//
+// The Header.Size does not reflect the size of any extended headers used.
+// Thus, this function will read from the raw io.Reader to fetch extra headers.
+// This method mutates blk in the process.
+func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, error) {
+	// Make sure that the input format is GNU.
+	// Unfortunately, the STAR format also has a sparse header format that uses
+	// the same type flag but has a completely different layout.
+	if blk.GetFormat() != formatGNU {
+		return nil, ErrHeader
+	}
+
+	var p parser
+	hdr.Size = p.parseNumeric(blk.GNU().RealSize())
+	if p.err != nil {
+		return nil, p.err
+	}
+	var s sparseArray = blk.GNU().Sparse()
+	var sp = make([]sparseEntry, 0, s.MaxEntries())
+	for {
+		for i := 0; i < s.MaxEntries(); i++ {
+			// This termination condition is identical to GNU and BSD tar.
+			if s.Entry(i).Offset()[0] == 0x00 {
+				break // Don't return, need to process extended headers (even if empty)
+			}
+			offset := p.parseNumeric(s.Entry(i).Offset())
+			numBytes := p.parseNumeric(s.Entry(i).NumBytes())
+			if p.err != nil {
+				return nil, p.err
+			}
+			sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+		}
+
+		if s.IsExtended()[0] > 0 {
+			// There are more entries. Read an extension header and parse its entries.
+			if _, err := io.ReadFull(tr.r, blk[:]); err != nil {
+				if err == io.EOF {
+					err = io.ErrUnexpectedEOF
+				}
+				return nil, err
+			}
+			s = blk.Sparse()
+			continue
+		}
+		return sp, nil // Done
+	}
+}
+
+// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
+// version 1.0. The format of the sparse map consists of a series of
+// newline-terminated numeric fields. The first field is the number of entries
+// and is always present. Following this are the entries, consisting of two
+// fields (offset, numBytes). This function must stop reading at the end
+// boundary of the block containing the last newline.
+//
+// Note that the GNU manual says that numeric values should be encoded in octal
+// format. However, the GNU tar utility itself outputs these values in decimal.
+// As such, this library treats values as being encoded in decimal.
+func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) {
+	var cntNewline int64
+	var buf bytes.Buffer
+	var blk = make([]byte, blockSize)
+
+	// feedTokens copies data in numBlock chunks from r into buf until there are
+	// at least cnt newlines in buf. It will not read more blocks than needed.
+	var feedTokens = func(cnt int64) error {
+		for cntNewline < cnt {
+			if _, err := io.ReadFull(r, blk); err != nil {
+				if err == io.EOF {
+					err = io.ErrUnexpectedEOF
+				}
+				return err
+			}
+			buf.Write(blk)
+			for _, c := range blk {
+				if c == '\n' {
+					cntNewline++
+				}
+			}
+		}
+		return nil
+	}
+
+	// nextToken gets the next token delimited by a newline. This assumes that
+	// at least one newline exists in the buffer.
+	var nextToken = func() string {
+		cntNewline--
+		tok, _ := buf.ReadString('\n')
+		return tok[:len(tok)-1] // Cut off newline
+	}
+
+	// Parse for the number of entries.
+	// Use integer overflow resistant math to check this.
+	if err := feedTokens(1); err != nil {
+		return nil, err
+	}
+	numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
+	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+		return nil, ErrHeader
+	}
+
+	// Parse for all member entries.
+	// numEntries is trusted after this since a potential attacker must have
+	// committed resources proportional to what this library used.
+	if err := feedTokens(2 * numEntries); err != nil {
+		return nil, err
+	}
+	sp := make([]sparseEntry, 0, numEntries)
+	for i := int64(0); i < numEntries; i++ {
+		offset, err := strconv.ParseInt(nextToken(), 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		numBytes, err := strconv.ParseInt(nextToken(), 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+	}
+	return sp, nil
+}
+
+// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
+// version 0.1. The sparse map is stored in the PAX headers.
+func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) {
+	// Get number of entries.
+	// Use integer overflow resistant math to check this.
+	numEntriesStr := extHdrs[paxGNUSparseNumBlocks]
+	numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
+	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
+		return nil, ErrHeader
+	}
+
+	// There should be two numbers in sparseMap for each entry.
+	sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",")
+	if int64(len(sparseMap)) != 2*numEntries {
+		return nil, ErrHeader
+	}
+
+	// Loop through the entries in the sparse map.
+	// numEntries is trusted now.
+	sp := make([]sparseEntry, 0, numEntries)
+	for i := int64(0); i < numEntries; i++ {
+		offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64)
+		if err != nil {
+			return nil, ErrHeader
+		}
+		sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes})
+	}
+	return sp, nil
+}
+
+// numBytes returns the number of bytes left to read in the current file's entry
+// in the tar archive, or 0 if there is no current file.
+func (tr *Reader) numBytes() int64 {
+	if tr.curr == nil {
+		// No current file, so no bytes
+		return 0
+	}
+	return tr.curr.numBytes()
+}
+
+// Read reads from the current entry in the tar archive.
+// It returns 0, io.EOF when it reaches the end of that entry,
+// until Next is called to advance to the next entry.
+//
+// Calling Read on special types like TypeLink, TypeSymLink, TypeChar,
+// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what
+// the Header.Size claims.
+func (tr *Reader) Read(b []byte) (int, error) {
+	if tr.err != nil {
+		return 0, tr.err
+	}
+	if tr.curr == nil {
+		return 0, io.EOF
+	}
+
+	n, err := tr.curr.Read(b)
+	if err != nil && err != io.EOF {
+		tr.err = err
+	}
+	return n, err
+}
+
+func (rfr *regFileReader) Read(b []byte) (n int, err error) {
+	if rfr.nb == 0 {
+		// file consumed
+		return 0, io.EOF
+	}
+	if int64(len(b)) > rfr.nb {
+		b = b[0:rfr.nb]
+	}
+	n, err = rfr.r.Read(b)
+	rfr.nb -= int64(n)
+
+	if err == io.EOF && rfr.nb > 0 {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// numBytes returns the number of bytes left to read in the file's data in the tar archive.
+func (rfr *regFileReader) numBytes() int64 {
+	return rfr.nb
+}
+
+// newSparseFileReader creates a new sparseFileReader, but validates all of the
+// sparse entries before doing so.
+func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) {
+	if total < 0 {
+		return nil, ErrHeader // Total size cannot be negative
+	}
+
+	// Validate all sparse entries. These are the same checks as performed by
+	// the BSD tar utility.
+	for i, s := range sp {
+		switch {
+		case s.offset < 0 || s.numBytes < 0:
+			return nil, ErrHeader // Negative values are never okay
+		case s.offset > math.MaxInt64-s.numBytes:
+			return nil, ErrHeader // Integer overflow with large length
+		case s.offset+s.numBytes > total:
+			return nil, ErrHeader // Region extends beyond the "real" size
+		case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset:
+			return nil, ErrHeader // Regions can't overlap and must be in order
+		}
+	}
+	return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil
+}
+
+// readHole reads a sparse hole ending at endOffset.
+func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int {
+	n64 := endOffset - sfr.pos
+	if n64 > int64(len(b)) {
+		n64 = int64(len(b))
+	}
+	n := int(n64)
+	for i := 0; i < n; i++ {
+		b[i] = 0
+	}
+	sfr.pos += n64
+	return n
+}
+
+// Read reads the sparse file data in expanded form.
+func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
+	// Skip past all empty fragments.
+	for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 {
+		sfr.sp = sfr.sp[1:]
+	}
+
+	// If there are no more fragments, then it is possible that there
+	// is one last sparse hole.
+	if len(sfr.sp) == 0 {
+		// This behavior matches the BSD tar utility.
+		// However, GNU tar stops returning data even if sfr.total is unmet.
+		if sfr.pos < sfr.total {
+			return sfr.readHole(b, sfr.total), nil
+		}
+		return 0, io.EOF
+	}
+
+	// In front of a data fragment, so read a hole.
+	if sfr.pos < sfr.sp[0].offset {
+		return sfr.readHole(b, sfr.sp[0].offset), nil
+	}
+
+	// In a data fragment, so read from it.
+	// This math is overflow free since we verify that offset and numBytes can
+	// be safely added when creating the sparseFileReader.
+	endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment
+	bytesLeft := endPos - sfr.pos                   // Bytes left in fragment
+	if int64(len(b)) > bytesLeft {
+		b = b[:bytesLeft]
+	}
+
+	n, err = sfr.rfr.Read(b)
+	sfr.pos += int64(n)
+	if err == io.EOF {
+		if sfr.pos < endPos {
+			err = io.ErrUnexpectedEOF // There was supposed to be more data
+		} else if sfr.pos < sfr.total {
+			err = nil // There is still an implicit sparse hole at the end
+		}
+	}
+
+	if sfr.pos == endPos {
+		sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it
+	}
+	return n, err
+}
+
+// numBytes returns the number of bytes left to read in the sparse file's
+// sparse-encoded data in the tar archive.
+func (sfr *sparseFileReader) numBytes() int64 {
+	return sfr.rfr.numBytes()
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/stat_atim.go b/vendor/github.com/dmcgowan/go-tar/stat_atim.go
new file mode 100644
index 0000000..cf9cc79
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/stat_atim.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux dragonfly openbsd solaris
+
+package tar
+
+import (
+	"syscall"
+	"time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+	return time.Unix(st.Atim.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+	return time.Unix(st.Ctim.Unix())
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/stat_atimespec.go b/vendor/github.com/dmcgowan/go-tar/stat_atimespec.go
new file mode 100644
index 0000000..6f17dbe
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/stat_atimespec.go
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd netbsd
+
+package tar
+
+import (
+	"syscall"
+	"time"
+)
+
+func statAtime(st *syscall.Stat_t) time.Time {
+	return time.Unix(st.Atimespec.Unix())
+}
+
+func statCtime(st *syscall.Stat_t) time.Time {
+	return time.Unix(st.Ctimespec.Unix())
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/stat_unix.go b/vendor/github.com/dmcgowan/go-tar/stat_unix.go
new file mode 100644
index 0000000..cb843db
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/stat_unix.go
@@ -0,0 +1,32 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin dragonfly freebsd openbsd netbsd solaris
+
+package tar
+
+import (
+	"os"
+	"syscall"
+)
+
+func init() {
+	sysStat = statUnix
+}
+
+func statUnix(fi os.FileInfo, h *Header) error {
+	sys, ok := fi.Sys().(*syscall.Stat_t)
+	if !ok {
+		return nil
+	}
+	h.Uid = int(sys.Uid)
+	h.Gid = int(sys.Gid)
+	// TODO(bradfitz): populate username & group.  os/user
+	// doesn't cache LookupId lookups, and lacks group
+	// lookup functions.
+	h.AccessTime = statAtime(sys)
+	h.ChangeTime = statCtime(sys)
+	// TODO(bradfitz): major/minor device numbers?
+	return nil
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/strconv.go b/vendor/github.com/dmcgowan/go-tar/strconv.go
new file mode 100644
index 0000000..bb5b51c
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/strconv.go
@@ -0,0 +1,252 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+	"strings"
+	"time"
+)
+
+func isASCII(s string) bool {
+	for _, c := range s {
+		if c >= 0x80 {
+			return false
+		}
+	}
+	return true
+}
+
+func toASCII(s string) string {
+	if isASCII(s) {
+		return s
+	}
+	var buf bytes.Buffer
+	for _, c := range s {
+		if c < 0x80 {
+			buf.WriteByte(byte(c))
+		}
+	}
+	return buf.String()
+}
+
+type parser struct {
+	err error // Last error seen
+}
+
+type formatter struct {
+	err error // Last error seen
+}
+
+// parseString parses bytes as a NUL-terminated C-style string.
+// If a NUL byte is not found then the whole slice is returned as a string.
+func (*parser) parseString(b []byte) string {
+	n := 0
+	for n < len(b) && b[n] != 0 {
+		n++
+	}
+	return string(b[0:n])
+}
+
+// Write s into b, terminating it with a NUL if there is room.
+func (f *formatter) formatString(b []byte, s string) {
+	if len(s) > len(b) {
+		f.err = ErrFieldTooLong
+		return
+	}
+	ascii := toASCII(s)
+	copy(b, ascii)
+	if len(ascii) < len(b) {
+		b[len(ascii)] = 0
+	}
+}
+
+// fitsInBase256 reports whether x can be encoded into n bytes using base-256
+// encoding. Unlike octal encoding, base-256 encoding does not require that the
+// string ends with a NUL character. Thus, all n bytes are available for output.
+//
+// If operating in binary mode, this assumes strict GNU binary mode; which means
+// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is
+// equivalent to the sign bit in two's complement form.
+func fitsInBase256(n int, x int64) bool {
+	var binBits = uint(n-1) * 8
+	return n >= 9 || (x >= -1<<binBits && x < 1<<binBits)
+}
+
+// parseNumeric parses the input as being encoded in either base-256 or octal.
+// This function may return negative numbers.
+// If parsing fails or an integer overflow occurs, err will be set.
+func (p *parser) parseNumeric(b []byte) int64 {
+	// Check for base-256 (binary) format first.
+	// If the first bit is set, then all following bits constitute a two's
+	// complement encoded number in big-endian byte order.
+	if len(b) > 0 && b[0]&0x80 != 0 {
+		// Handling negative numbers relies on the following identity:
+		//	-a-1 == ^a
+		//
+		// If the number is negative, we use an inversion mask to invert the
+		// data bytes and treat the value as an unsigned number.
+		var inv byte // 0x00 if positive or zero, 0xff if negative
+		if b[0]&0x40 != 0 {
+			inv = 0xff
+		}
+
+		var x uint64
+		for i, c := range b {
+			c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing
+			if i == 0 {
+				c &= 0x7f // Ignore signal bit in first byte
+			}
+			if (x >> 56) > 0 {
+				p.err = ErrHeader // Integer overflow
+				return 0
+			}
+			x = x<<8 | uint64(c)
+		}
+		if (x >> 63) > 0 {
+			p.err = ErrHeader // Integer overflow
+			return 0
+		}
+		if inv == 0xff {
+			return ^int64(x)
+		}
+		return int64(x)
+	}
+
+	// Normal case is base-8 (octal) format.
+	return p.parseOctal(b)
+}
+
+// Write x into b, as binary (GNUtar/star extension).
+func (f *formatter) formatNumeric(b []byte, x int64) {
+	if fitsInBase256(len(b), x) {
+		for i := len(b) - 1; i >= 0; i-- {
+			b[i] = byte(x)
+			x >>= 8
+		}
+		b[0] |= 0x80 // Highest bit indicates binary format
+		return
+	}
+
+	f.formatOctal(b, 0) // Last resort, just write zero
+	f.err = ErrFieldTooLong
+}
+
+func (p *parser) parseOctal(b []byte) int64 {
+	// Because unused fields are filled with NULs, we need
+	// to skip leading NULs. Fields may also be padded with
+	// spaces or NULs.
+	// So we remove leading and trailing NULs and spaces to
+	// be sure.
+	b = bytes.Trim(b, " \x00")
+
+	if len(b) == 0 {
+		return 0
+	}
+	x, perr := strconv.ParseUint(p.parseString(b), 8, 64)
+	if perr != nil {
+		p.err = ErrHeader
+	}
+	return int64(x)
+}
+
+func (f *formatter) formatOctal(b []byte, x int64) {
+	s := strconv.FormatInt(x, 8)
+	// Add leading zeros, but leave room for a NUL.
+	if n := len(b) - len(s) - 1; n > 0 {
+		s = strings.Repeat("0", n) + s
+	}
+	f.formatString(b, s)
+}
+
+// parsePAXTime takes a string of the form %d.%d as described in the PAX
+// specification. Note that this implementation allows for negative timestamps,
+// which is allowed for by the PAX specification, but not always portable.
+func parsePAXTime(s string) (time.Time, error) {
+	const maxNanoSecondDigits = 9
+
+	// Split string into seconds and sub-seconds parts.
+	ss, sn := s, ""
+	if pos := strings.IndexByte(s, '.'); pos >= 0 {
+		ss, sn = s[:pos], s[pos+1:]
+	}
+
+	// Parse the seconds.
+	secs, err := strconv.ParseInt(ss, 10, 64)
+	if err != nil {
+		return time.Time{}, ErrHeader
+	}
+	if len(sn) == 0 {
+		return time.Unix(secs, 0), nil // No sub-second values
+	}
+
+	// Parse the nanoseconds.
+	if strings.Trim(sn, "0123456789") != "" {
+		return time.Time{}, ErrHeader
+	}
+	if len(sn) < maxNanoSecondDigits {
+		sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad
+	} else {
+		sn = sn[:maxNanoSecondDigits] // Right truncate
+	}
+	nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed
+	if len(ss) > 0 && ss[0] == '-' {
+		return time.Unix(secs, -1*int64(nsecs)), nil // Negative correction
+	}
+	return time.Unix(secs, int64(nsecs)), nil
+}
+
+// TODO(dsnet): Implement formatPAXTime.
+
+// parsePAXRecord parses the input PAX record string into a key-value pair.
+// If parsing is successful, it will slice off the currently read record and
+// return the remainder as r.
+//
+// A PAX record is of the following form:
+//	"%d %s=%s\n" % (size, key, value)
+func parsePAXRecord(s string) (k, v, r string, err error) {
+	// The size field ends at the first space.
+	sp := strings.IndexByte(s, ' ')
+	if sp == -1 {
+		return "", "", s, ErrHeader
+	}
+
+	// Parse the first token as a decimal integer.
+	n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int
+	if perr != nil || n < 5 || int64(len(s)) < n {
+		return "", "", s, ErrHeader
+	}
+
+	// Extract everything between the space and the final newline.
+	rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:]
+	if nl != "\n" {
+		return "", "", s, ErrHeader
+	}
+
+	// The first equals separates the key from the value.
+	eq := strings.IndexByte(rec, '=')
+	if eq == -1 {
+		return "", "", s, ErrHeader
+	}
+	return rec[:eq], rec[eq+1:], rem, nil
+}
+
+// formatPAXRecord formats a single PAX record, prefixing it with the
+// appropriate length.
+func formatPAXRecord(k, v string) string {
+	const padding = 3 // Extra padding for ' ', '=', and '\n'
+	size := len(k) + len(v) + padding
+	size += len(strconv.Itoa(size))
+	record := fmt.Sprintf("%d %s=%s\n", size, k, v)
+
+	// Final adjustment if adding size field increased the record size.
+	if len(record) != size {
+		size = len(record)
+		record = fmt.Sprintf("%d %s=%s\n", size, k, v)
+	}
+	return record
+}
diff --git a/vendor/github.com/dmcgowan/go-tar/writer.go b/vendor/github.com/dmcgowan/go-tar/writer.go
new file mode 100644
index 0000000..596fb8b
--- /dev/null
+++ b/vendor/github.com/dmcgowan/go-tar/writer.go
@@ -0,0 +1,364 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+// TODO(dsymonds):
+// - catch more errors (no first header, etc.)
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	ErrWriteTooLong    = errors.New("archive/tar: write too long")
+	ErrFieldTooLong    = errors.New("archive/tar: header field too long")
+	ErrWriteAfterClose = errors.New("archive/tar: write after close")
+	errInvalidHeader   = errors.New("archive/tar: header field too long or contains invalid values")
+)
+
+// A Writer provides sequential writing of a tar archive in POSIX.1 format.
+// A tar archive consists of a sequence of files.
+// Call WriteHeader to begin a new file, and then call Write to supply that file's data,
+// writing at most hdr.Size bytes in total.
+type Writer struct {
+	w          io.Writer
+	err        error
+	nb         int64 // number of unwritten bytes for current file entry
+	pad        int64 // amount of padding to write after current file entry
+	closed     bool
+	usedBinary bool  // whether the binary numeric field extension was used
+	preferPax  bool  // use PAX header instead of binary numeric header
+	hdrBuff    block // buffer to use in writeHeader when writing a regular header
+	paxHdrBuff block // buffer to use in writeHeader when writing a PAX header
+}
+
+// NewWriter creates a new Writer writing to w.
+func NewWriter(w io.Writer) *Writer { return &Writer{w: w} }
+
+// Flush finishes writing the current file (optional).
+func (tw *Writer) Flush() error {
+	if tw.nb > 0 {
+		tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb)
+		return tw.err
+	}
+
+	n := tw.nb + tw.pad
+	for n > 0 && tw.err == nil {
+		nr := n
+		if nr > blockSize {
+			nr = blockSize
+		}
+		var nw int
+		nw, tw.err = tw.w.Write(zeroBlock[0:nr])
+		n -= int64(nw)
+	}
+	tw.nb = 0
+	tw.pad = 0
+	return tw.err
+}
+
+var (
+	minTime = time.Unix(0, 0)
+	// There is room for 11 octal digits (33 bits) of mtime.
+	maxTime = minTime.Add((1<<33 - 1) * time.Second)
+)
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+func (tw *Writer) WriteHeader(hdr *Header) error {
+	return tw.writeHeader(hdr, true)
+}
+
+// WriteHeader writes hdr and prepares to accept the file's contents.
+// WriteHeader calls Flush if it is not the first header.
+// Calling after a Close will return ErrWriteAfterClose.
+// As this method is called internally by writePax header to allow it to
+// suppress writing the pax header.
+func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error {
+	if tw.closed {
+		return ErrWriteAfterClose
+	}
+	if tw.err == nil {
+		tw.Flush()
+	}
+	if tw.err != nil {
+		return tw.err
+	}
+
+	// a map to hold pax header records, if any are needed
+	paxHeaders := make(map[string]string)
+
+	// TODO(dsnet): we might want to use PAX headers for
+	// subsecond time resolution, but for now let's just capture
+	// too long fields or non ascii characters
+
+	// We need to select which scratch buffer to use carefully,
+	// since this method is called recursively to write PAX headers.
+	// If allowPax is true, this is the non-recursive call, and we will use hdrBuff.
+	// If allowPax is false, we are being called by writePAXHeader, and hdrBuff is
+	// already being used by the non-recursive call, so we must use paxHdrBuff.
+	header := &tw.hdrBuff
+	if !allowPax {
+		header = &tw.paxHdrBuff
+	}
+	copy(header[:], zeroBlock[:])
+
+	// Wrappers around formatter that automatically sets paxHeaders if the
+	// argument extends beyond the capacity of the input byte slice.
+	var f formatter
+	var formatString = func(b []byte, s string, paxKeyword string) {
+		needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s)
+		if needsPaxHeader {
+			paxHeaders[paxKeyword] = s
+			return
+		}
+		f.formatString(b, s)
+	}
+	var formatNumeric = func(b []byte, x int64, paxKeyword string) {
+		// Try octal first.
+		s := strconv.FormatInt(x, 8)
+		if len(s) < len(b) {
+			f.formatOctal(b, x)
+			return
+		}
+
+		// If it is too long for octal, and PAX is preferred, use a PAX header.
+		if paxKeyword != paxNone && tw.preferPax {
+			f.formatOctal(b, 0)
+			s := strconv.FormatInt(x, 10)
+			paxHeaders[paxKeyword] = s
+			return
+		}
+
+		tw.usedBinary = true
+		f.formatNumeric(b, x)
+	}
+
+	// Handle out of range ModTime carefully.
+	var modTime int64
+	if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) {
+		modTime = hdr.ModTime.Unix()
+	}
+
+	v7 := header.V7()
+	formatString(v7.Name(), hdr.Name, paxPath)
+	// TODO(dsnet): The GNU format permits the mode field to be encoded in
+	// base-256 format. Thus, we can use formatNumeric instead of formatOctal.
+	f.formatOctal(v7.Mode(), hdr.Mode)
+	formatNumeric(v7.UID(), int64(hdr.Uid), paxUid)
+	formatNumeric(v7.GID(), int64(hdr.Gid), paxGid)
+	formatNumeric(v7.Size(), hdr.Size, paxSize)
+	// TODO(dsnet): Consider using PAX for finer time granularity.
+	formatNumeric(v7.ModTime(), modTime, paxNone)
+	v7.TypeFlag()[0] = hdr.Typeflag
+	formatString(v7.LinkName(), hdr.Linkname, paxLinkpath)
+
+	ustar := header.USTAR()
+	formatString(ustar.UserName(), hdr.Uname, paxUname)
+	formatString(ustar.GroupName(), hdr.Gname, paxGname)
+	formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone)
+	formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone)
+
+	// TODO(dsnet): The logic surrounding the prefix field is broken when trying
+	// to encode the header as GNU format. The challenge with the current logic
+	// is that we are unsure what format we are using at any given moment until
+	// we have processed *all* of the fields. The problem is that by the time
+	// all fields have been processed, some work has already been done to handle
+	// each field under the assumption that it is for one given format or
+	// another. In some situations, this causes the Writer to be confused and
+	// encode a prefix field when the format being used is GNU. Thus, producing
+	// an invalid tar file.
+	//
+	// As a short-term fix, we disable the logic to use the prefix field, which
+	// will force the badly generated GNU files to become encoded as being
+	// the PAX format.
+	//
+	// As an alternative fix, we could hard-code preferPax to be true. However,
+	// this is problematic for the following reasons:
+	//	* The preferPax functionality is not tested at all.
+	//	* This can result in headers that try to use both the GNU and PAX
+	//	features at the same time, which is also wrong.
+	//
+	// The proper fix for this is to use a two-pass method:
+	//	* The first pass simply determines what set of formats can possibly
+	//	encode the given header.
+	//	* The second pass actually encodes the header as that given format
+	//	without worrying about violating the format.
+	//
+	// See the following:
+	//	https://golang.org/issue/12594
+	//	https://golang.org/issue/17630
+	//	https://golang.org/issue/9683
+	const usePrefix = false
+
+	// try to use a ustar header when only the name is too long
+	_, paxPathUsed := paxHeaders[paxPath]
+	if usePrefix && !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed {
+		prefix, suffix, ok := splitUSTARPath(hdr.Name)
+		if ok {
+			// Since we can encode in USTAR format, disable PAX header.
+			delete(paxHeaders, paxPath)
+
+			// Update the path fields
+			formatString(v7.Name(), suffix, paxNone)
+			formatString(ustar.Prefix(), prefix, paxNone)
+		}
+	}
+
+	if tw.usedBinary {
+		header.SetFormat(formatGNU)
+	} else {
+		header.SetFormat(formatUSTAR)
+	}
+
+	// Check if there were any formatting errors.
+	if f.err != nil {
+		tw.err = f.err
+		return tw.err
+	}
+
+	if allowPax {
+		for k, v := range hdr.Xattrs {
+			paxHeaders[paxXattr+k] = v
+		}
+	}
+
+	if len(paxHeaders) > 0 {
+		if !allowPax {
+			return errInvalidHeader
+		}
+		if err := tw.writePAXHeader(hdr, paxHeaders); err != nil {
+			return err
+		}
+	}
+	tw.nb = hdr.Size
+	tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize
+
+	_, tw.err = tw.w.Write(header[:])
+	return tw.err
+}
+
+// splitUSTARPath splits a path according to USTAR prefix and suffix rules.
+// If the path is not splittable, then it will return ("", "", false).
+func splitUSTARPath(name string) (prefix, suffix string, ok bool) {
+	length := len(name)
+	if length <= nameSize || !isASCII(name) {
+		return "", "", false
+	} else if length > prefixSize+1 {
+		length = prefixSize + 1
+	} else if name[length-1] == '/' {
+		length--
+	}
+
+	i := strings.LastIndex(name[:length], "/")
+	nlen := len(name) - i - 1 // nlen is length of suffix
+	plen := i                 // plen is length of prefix
+	if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize {
+		return "", "", false
+	}
+	return name[:i], name[i+1:], true
+}
+
+// writePaxHeader writes an extended pax header to the
+// archive.
+func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error {
+	// Prepare extended header
+	ext := new(Header)
+	ext.Typeflag = TypeXHeader
+	// Setting ModTime is required for reader parsing to
+	// succeed, and seems harmless enough.
+	ext.ModTime = hdr.ModTime
+	// The spec asks that we namespace our pseudo files
+	// with the current pid. However, this results in differing outputs
+	// for identical inputs. As such, the constant 0 is now used instead.
+	// golang.org/issue/12358
+	dir, file := path.Split(hdr.Name)
+	fullName := path.Join(dir, "PaxHeaders.0", file)
+
+	ascii := toASCII(fullName)
+	if len(ascii) > nameSize {
+		ascii = ascii[:nameSize]
+	}
+	ext.Name = ascii
+	// Construct the body
+	var buf bytes.Buffer
+
+	// Keys are sorted before writing to body to allow deterministic output.
+	keys := make([]string, 0, len(paxHeaders))
+	for k := range paxHeaders {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+
+	for _, k := range keys {
+		fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k]))
+	}
+
+	ext.Size = int64(len(buf.Bytes()))
+	if err := tw.writeHeader(ext, false); err != nil {
+		return err
+	}
+	if _, err := tw.Write(buf.Bytes()); err != nil {
+		return err
+	}
+	if err := tw.Flush(); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Write writes to the current entry in the tar archive.
+// Write returns the error ErrWriteTooLong if more than
+// hdr.Size bytes are written after WriteHeader.
+func (tw *Writer) Write(b []byte) (n int, err error) {
+	if tw.closed {
+		err = ErrWriteAfterClose
+		return
+	}
+	overwrite := false
+	if int64(len(b)) > tw.nb {
+		b = b[0:tw.nb]
+		overwrite = true
+	}
+	n, err = tw.w.Write(b)
+	tw.nb -= int64(n)
+	if err == nil && overwrite {
+		err = ErrWriteTooLong
+		return
+	}
+	tw.err = err
+	return
+}
+
+// Close closes the tar archive, flushing any unwritten
+// data to the underlying writer.
+func (tw *Writer) Close() error {
+	if tw.err != nil || tw.closed {
+		return tw.err
+	}
+	tw.Flush()
+	tw.closed = true
+	if tw.err != nil {
+		return tw.err
+	}
+
+	// trailer: two zero blocks
+	for i := 0; i < 2; i++ {
+		_, tw.err = tw.w.Write(zeroBlock[:])
+		if tw.err != nil {
+			break
+		}
+	}
+	return tw.err
+}
diff --git a/vendor/github.com/docker/libnetwork/agent.go b/vendor/github.com/docker/libnetwork/agent.go
index 1328f0b..f120065 100644
--- a/vendor/github.com/docker/libnetwork/agent.go
+++ b/vendor/github.com/docker/libnetwork/agent.go
@@ -6,11 +6,9 @@
 	"encoding/json"
 	"fmt"
 	"net"
-	"os"
 	"sort"
 	"sync"
 
-	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/go-events"
 	"github.com/docker/libnetwork/cluster"
 	"github.com/docker/libnetwork/datastore"
@@ -282,12 +280,8 @@
 	}
 
 	keys, _ := c.getKeys(subsysGossip)
-	hostname, _ := os.Hostname()
-	nodeName := hostname + "-" + stringid.TruncateID(stringid.GenerateRandomID())
-	logrus.Info("Gossip cluster hostname ", nodeName)
 
 	netDBConf := networkdb.DefaultConfig()
-	netDBConf.NodeName = nodeName
 	netDBConf.BindAddr = listenAddr
 	netDBConf.AdvertiseAddr = advertiseAddr
 	netDBConf.Keys = keys
@@ -892,13 +886,13 @@
 		if svcID != "" {
 			// This is a remote task part of a service
 			if err := c.addServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent"); err != nil {
-				logrus.Errorf("failed adding service binding for %s epRec:%v err:%s", eid, epRec, err)
+				logrus.Errorf("failed adding service binding for %s epRec:%v err:%v", eid, epRec, err)
 				return
 			}
 		} else {
 			// This is a remote container simply attached to an attachable network
 			if err := c.addContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil {
-				logrus.Errorf("failed adding service binding for %s epRec:%v err:%s", eid, epRec, err)
+				logrus.Errorf("failed adding container name resolution for %s epRec:%v err:%v", eid, epRec, err)
 			}
 		}
 	} else {
@@ -906,13 +900,13 @@
 		if svcID != "" {
 			// This is a remote task part of a service
 			if err := c.rmServiceBinding(svcName, svcID, nid, eid, containerName, vip, ingressPorts, serviceAliases, taskAliases, ip, "handleEpTableEvent", true); err != nil {
-				logrus.Errorf("failed removing service binding for %s epRec:%v err:%s", eid, epRec, err)
+				logrus.Errorf("failed removing service binding for %s epRec:%v err:%v", eid, epRec, err)
 				return
 			}
 		} else {
 			// This is a remote container simply attached to an attachable network
 			if err := c.delContainerNameResolution(nid, eid, containerName, taskAliases, ip, "handleEpTableEvent"); err != nil {
-				logrus.Errorf("failed adding service binding for %s epRec:%v err:%s", eid, epRec, err)
+				logrus.Errorf("failed removing container name resolution for %s epRec:%v err:%v", eid, epRec, err)
 			}
 		}
 	}
diff --git a/vendor/github.com/docker/libnetwork/bitseq/sequence.go b/vendor/github.com/docker/libnetwork/bitseq/sequence.go
index 3946473..a1a9810 100644
--- a/vendor/github.com/docker/libnetwork/bitseq/sequence.go
+++ b/vendor/github.com/docker/libnetwork/bitseq/sequence.go
@@ -41,6 +41,7 @@
 	id         string
 	dbIndex    uint64
 	dbExists   bool
+	curr       uint64
 	store      datastore.DataStore
 	sync.Mutex
 }
@@ -193,26 +194,27 @@
 		dbIndex:    h.dbIndex,
 		dbExists:   h.dbExists,
 		store:      h.store,
+		curr:       h.curr,
 	}
 }
 
 // SetAnyInRange atomically sets the first unset bit in the specified range in the sequence and returns the corresponding ordinal
-func (h *Handle) SetAnyInRange(start, end uint64) (uint64, error) {
+func (h *Handle) SetAnyInRange(start, end uint64, serial bool) (uint64, error) {
 	if end < start || end >= h.bits {
 		return invalidPos, fmt.Errorf("invalid bit range [%d, %d]", start, end)
 	}
 	if h.Unselected() == 0 {
 		return invalidPos, ErrNoBitAvailable
 	}
-	return h.set(0, start, end, true, false)
+	return h.set(0, start, end, true, false, serial)
 }
 
 // SetAny atomically sets the first unset bit in the sequence and returns the corresponding ordinal
-func (h *Handle) SetAny() (uint64, error) {
+func (h *Handle) SetAny(serial bool) (uint64, error) {
 	if h.Unselected() == 0 {
 		return invalidPos, ErrNoBitAvailable
 	}
-	return h.set(0, 0, h.bits-1, true, false)
+	return h.set(0, 0, h.bits-1, true, false, serial)
 }
 
 // Set atomically sets the corresponding bit in the sequence
@@ -220,7 +222,7 @@
 	if err := h.validateOrdinal(ordinal); err != nil {
 		return err
 	}
-	_, err := h.set(ordinal, 0, 0, false, false)
+	_, err := h.set(ordinal, 0, 0, false, false, false)
 	return err
 }
 
@@ -229,7 +231,7 @@
 	if err := h.validateOrdinal(ordinal); err != nil {
 		return err
 	}
-	_, err := h.set(ordinal, 0, 0, false, true)
+	_, err := h.set(ordinal, 0, 0, false, true, false)
 	return err
 }
 
@@ -298,7 +300,7 @@
 }
 
 // set/reset the bit
-func (h *Handle) set(ordinal, start, end uint64, any bool, release bool) (uint64, error) {
+func (h *Handle) set(ordinal, start, end uint64, any bool, release bool, serial bool) (uint64, error) {
 	var (
 		bitPos  uint64
 		bytePos uint64
@@ -308,6 +310,7 @@
 
 	for {
 		var store datastore.DataStore
+		curr := uint64(0)
 		h.Lock()
 		store = h.store
 		h.Unlock()
@@ -318,15 +321,18 @@
 		}
 
 		h.Lock()
+		if serial {
+			curr = h.curr
+		}
 		// Get position if available
 		if release {
 			bytePos, bitPos = ordinalToPos(ordinal)
 		} else {
 			if any {
-				bytePos, bitPos, err = getFirstAvailable(h.head, start)
+				bytePos, bitPos, err = getAvailableFromCurrent(h.head, start, curr, end)
 				ret = posToOrdinal(bytePos, bitPos)
-				if end < ret {
-					err = ErrNoBitAvailable
+				if err == nil {
+					h.curr = ret + 1
 				}
 			} else {
 				bytePos, bitPos, err = checkIfAvailable(h.head, ordinal)
@@ -515,6 +521,29 @@
 	return invalidPos, invalidPos, ErrNoBitAvailable
 }
 
+// getAvailableFromCurrent will look for available ordinal from the current ordinal.
+// If none found then it will loop back to the start to check of the available bit.
+// This can be further optimized to check from start till curr in case of a rollover
+func getAvailableFromCurrent(head *sequence, start, curr, end uint64) (uint64, uint64, error) {
+	var bytePos, bitPos uint64
+	if curr != 0 && curr > start {
+		bytePos, bitPos, _ = getFirstAvailable(head, curr)
+		ret := posToOrdinal(bytePos, bitPos)
+		if end < ret {
+			goto begin
+		}
+		return bytePos, bitPos, nil
+	}
+
+begin:
+	bytePos, bitPos, _ = getFirstAvailable(head, start)
+	ret := posToOrdinal(bytePos, bitPos)
+	if end < ret {
+		return invalidPos, invalidPos, ErrNoBitAvailable
+	}
+	return bytePos, bitPos, nil
+}
+
 // checkIfAvailable checks if the bit correspondent to the specified ordinal is unset
 // If the ordinal is beyond the sequence limits, a negative response is returned
 func checkIfAvailable(head *sequence, ordinal uint64) (uint64, uint64, error) {
diff --git a/vendor/github.com/docker/libnetwork/bitseq/store.go b/vendor/github.com/docker/libnetwork/bitseq/store.go
index 5448927..cdb7f04 100644
--- a/vendor/github.com/docker/libnetwork/bitseq/store.go
+++ b/vendor/github.com/docker/libnetwork/bitseq/store.go
@@ -87,6 +87,7 @@
 	dstH.dbIndex = h.dbIndex
 	dstH.dbExists = h.dbExists
 	dstH.store = h.store
+	dstH.curr = h.curr
 	dstH.Unlock()
 
 	return nil
diff --git a/vendor/github.com/docker/libnetwork/config/config.go b/vendor/github.com/docker/libnetwork/config/config.go
index 96a157a..3e8473d 100644
--- a/vendor/github.com/docker/libnetwork/config/config.go
+++ b/vendor/github.com/docker/libnetwork/config/config.go
@@ -15,6 +15,11 @@
 	"github.com/sirupsen/logrus"
 )
 
+const (
+	warningThNetworkControlPlaneMTU = 1500
+	minimumNetworkControlPlaneMTU   = 500
+)
+
 // Config encapsulates configurations of various Libnetwork components
 type Config struct {
 	Daemon          DaemonCfg
@@ -226,9 +231,12 @@
 func OptionNetworkControlPlaneMTU(exp int) Option {
 	return func(c *Config) {
 		logrus.Debugf("Network Control Plane MTU: %d", exp)
-		if exp < 1500 {
-			// if exp == 0 the value won't be used
-			logrus.Warnf("Received a MTU of %d, this value is very low, the network control plane can misbehave", exp)
+		if exp < warningThNetworkControlPlaneMTU {
+			logrus.Warnf("Received a MTU of %d, this value is very low, the network control plane can misbehave,"+
+				" defaulting to minimum value (%d)", exp, minimumNetworkControlPlaneMTU)
+			if exp < minimumNetworkControlPlaneMTU {
+				exp = minimumNetworkControlPlaneMTU
+			}
 		}
 		c.Daemon.NetworkControlPlaneMTU = exp
 	}
diff --git a/vendor/github.com/docker/libnetwork/controller.go b/vendor/github.com/docker/libnetwork/controller.go
index 801097a..236095c 100644
--- a/vendor/github.com/docker/libnetwork/controller.go
+++ b/vendor/github.com/docker/libnetwork/controller.go
@@ -341,6 +341,7 @@
 			// should still be present when cleaning up
 			// service bindings
 			c.agentClose()
+			c.cleanupServiceDiscovery("")
 			c.cleanupServiceBindings("")
 
 			c.agentStopComplete()
diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
index 64a2743..1742c8d 100644
--- a/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
+++ b/vendor/github.com/docker/libnetwork/drivers/bridge/bridge.go
@@ -763,11 +763,7 @@
 
 	// Apply the prepared list of steps, and abort at the first error.
 	bridgeSetup.queueStep(setupDeviceUp)
-	if err = bridgeSetup.apply(); err != nil {
-		return err
-	}
-
-	return nil
+	return bridgeSetup.apply()
 }
 
 func (d *driver) DeleteNetwork(nid string) error {
diff --git a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
index f01b08d..da65444 100644
--- a/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
+++ b/vendor/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go
@@ -169,11 +169,7 @@
 	}
 
 	// Set Accept on all non-intercontainer outgoing packets.
-	if err := programChainRule(outRule, "ACCEPT NON_ICC OUTGOING", enable); err != nil {
-		return err
-	}
-
-	return nil
+	return programChainRule(outRule, "ACCEPT NON_ICC OUTGOING", enable)
 }
 
 func programChainRule(rule iptRule, ruleDescr string, insert bool) error {
@@ -304,10 +300,7 @@
 		return err
 	}
 	// Set Inter Container Communication.
-	if err := setIcc(bridgeIface, icc, insert); err != nil {
-		return err
-	}
-	return nil
+	return setIcc(bridgeIface, icc, insert)
 }
 
 func clearEndpointConnections(nlh *netlink.Handle, ep *bridgeEndpoint) {
diff --git a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go
index 0c39ec9..28d6cca 100644
--- a/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go
+++ b/vendor/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go
@@ -201,5 +201,5 @@
 
 // getDummyName returns the name of a dummy parent with truncated net ID and driver prefix
 func getDummyName(netID string) string {
-	return fmt.Sprintf("%s%s", dummyPrefix, netID)
+	return dummyPrefix + netID
 }
diff --git a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go
index 843a2e7..98d4bd3 100644
--- a/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go
+++ b/vendor/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go
@@ -205,5 +205,5 @@
 
 // getDummyName returns the name of a dummy parent with truncated net ID and driver prefix
 func getDummyName(netID string) string {
-	return fmt.Sprintf("%s%s", dummyPrefix, netID)
+	return dummyPrefix + netID
 }
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go b/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go
index f12d7a8..802d7bc 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/encryption.go
@@ -21,7 +21,6 @@
 
 const (
 	r            = 0xD0C4E3
-	timeout      = 30
 	pktExpansion = 26 // SPI(4) + SeqN(4) + IV(8) + PadLength(1) + NextHeader(1) + ICV(8)
 )
 
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go b/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
index a07838b..0770513 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/joinleave.go
@@ -68,7 +68,7 @@
 
 	ep.ifName = containerIfName
 
-	if err := d.writeEndpointToStore(ep); err != nil {
+	if err = d.writeEndpointToStore(ep); err != nil {
 		return fmt.Errorf("failed to update overlay endpoint %s to local data store: %v", ep.id[0:7], err)
 	}
 
@@ -86,7 +86,7 @@
 		return err
 	}
 
-	if err := sbox.AddInterface(overlayIfName, "veth",
+	if err = sbox.AddInterface(overlayIfName, "veth",
 		sbox.InterfaceOptions().Master(s.brName)); err != nil {
 		return fmt.Errorf("could not add veth pair inside the network sandbox: %v", err)
 	}
@@ -100,7 +100,7 @@
 		return err
 	}
 
-	if err := nlh.LinkSetHardwareAddr(veth, ep.mac); err != nil {
+	if err = nlh.LinkSetHardwareAddr(veth, ep.mac); err != nil {
 		return fmt.Errorf("could not set mac address (%v) to the container interface: %v", ep.mac, err)
 	}
 
@@ -108,7 +108,7 @@
 		if sub == s {
 			continue
 		}
-		if err := jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
+		if err = jinfo.AddStaticRoute(sub.subnetIP, types.NEXTHOP, s.gwIP.IP); err != nil {
 			logrus.Errorf("Adding subnet %s static route in network %q failed\n", s.subnetIP, n.id)
 		}
 	}
@@ -122,7 +122,7 @@
 
 	d.peerAdd(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), false, false, true)
 
-	if err := d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
+	if err = d.checkEncryption(nid, nil, n.vxlanID(s), true, true); err != nil {
 		logrus.Warn(err)
 	}
 
@@ -200,7 +200,7 @@
 	}
 
 	if etype == driverapi.Delete {
-		d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep)
+		d.peerDelete(nid, eid, addr.IP, addr.Mask, mac, vtep, false)
 		return
 	}
 
@@ -232,11 +232,9 @@
 		}
 	}
 
-	n.leaveSandbox()
+	d.peerDelete(nid, eid, ep.addr.IP, ep.addr.Mask, ep.mac, net.ParseIP(d.advertiseAddress), true)
 
-	if err := d.checkEncryption(nid, nil, 0, true, false); err != nil {
-		logrus.Warn(err)
-	}
+	n.leaveSandbox()
 
 	return nil
 }
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
index d0e63f6..bb08de4 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go
@@ -144,11 +144,7 @@
 		return fmt.Errorf("overlay local store not initialized, ep not deleted")
 	}
 
-	if err := d.localStore.DeleteObjectAtomic(e); err != nil {
-		return err
-	}
-
-	return nil
+	return d.localStore.DeleteObjectAtomic(e)
 }
 
 func (d *driver) writeEndpointToStore(e *endpoint) error {
@@ -156,10 +152,7 @@
 		return fmt.Errorf("overlay local store not initialized, ep not added")
 	}
 
-	if err := d.localStore.PutObjectAtomic(e); err != nil {
-		return err
-	}
-	return nil
+	return d.localStore.PutObjectAtomic(e)
 }
 
 func (ep *endpoint) DataScope() string {
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
index 126093f..3fbfccf 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_network.go
@@ -119,7 +119,7 @@
 	data := []byte{'0', '\n'}
 
 	if err = ioutil.WriteFile(path, data, 0644); err != nil {
-		logrus.Errorf("endbling default vlan on bridge %s failed %v", brName, err)
+		logrus.Errorf("enabling default vlan on bridge %s failed %v", brName, err)
 		os.Exit(1)
 	}
 	os.Exit(0)
@@ -251,8 +251,9 @@
 		if err := d.deleteEndpointFromStore(ep); err != nil {
 			logrus.Warnf("Failed to delete overlay endpoint %s from local store: %v", ep.id[0:7], err)
 		}
-
 	}
+	// flush the peerDB entries
+	d.peerFlush(nid)
 	d.deleteNetwork(nid)
 
 	vnis, err := n.releaseVxlanID()
@@ -494,7 +495,7 @@
 	brIfaceOption := make([]osl.IfaceOption, 2)
 	brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP))
 	brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true))
-	Ifaces[fmt.Sprintf("%s+%s", brName, "br")] = brIfaceOption
+	Ifaces[brName+"+br"] = brIfaceOption
 
 	err := sbox.Restore(Ifaces, nil, nil, nil)
 	if err != nil {
@@ -504,12 +505,8 @@
 	Ifaces = make(map[string][]osl.IfaceOption)
 	vxlanIfaceOption := make([]osl.IfaceOption, 1)
 	vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName))
-	Ifaces[fmt.Sprintf("%s+%s", vxlanName, "vxlan")] = vxlanIfaceOption
-	err = sbox.Restore(Ifaces, nil, nil, nil)
-	if err != nil {
-		return err
-	}
-	return nil
+	Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption
+	return sbox.Restore(Ifaces, nil, nil, nil)
 }
 
 func (n *network) setupSubnetSandbox(s *subnet, brName, vxlanName string) error {
@@ -760,58 +757,38 @@
 				continue
 			}
 
-			logrus.Debugf("miss notification: dest IP %v, dest MAC %v", ip, mac)
-
 			if neigh.State&(netlink.NUD_STALE|netlink.NUD_INCOMPLETE) == 0 {
 				continue
 			}
 
 			if n.driver.isSerfAlive() {
+				logrus.Debugf("miss notification: dest IP %v, dest MAC %v", ip, mac)
 				mac, IPmask, vtep, err := n.driver.resolvePeer(n.id, ip)
 				if err != nil {
 					logrus.Errorf("could not resolve peer %q: %v", ip, err)
 					continue
 				}
 				n.driver.peerAdd(n.id, "dummy", ip, IPmask, mac, vtep, l2Miss, l3Miss, false)
-			} else {
-				// If the gc_thresh values are lower kernel might knock off the neighor entries.
-				// When we get a L3 miss check if its a valid peer and reprogram the neighbor
-				// entry again. Rate limit it to once attempt every 500ms, just in case a faulty
-				// container sends a flood of packets to invalid peers
-				if !l3Miss {
-					continue
-				}
-				if time.Since(t) > 500*time.Millisecond {
+			} else if l3Miss && time.Since(t) > time.Second {
+				// All the local peers will trigger a miss notification but this one is expected and the local container will reply
+				// autonomously to the ARP request
+				// In case the gc_thresh3 values is low kernel might reject new entries during peerAdd. This will trigger the following
+				// extra logs that will inform of the possible issue.
+				// Entries created would not be deleted see documentation http://man7.org/linux/man-pages/man7/arp.7.html:
+				// Entries which are marked as permanent are never deleted by the garbage-collector.
+				// The time limit here is to guarantee that the dbSearch is not
+				// done too frequently causing a stall of the peerDB operations.
+				pKey, pEntry, err := n.driver.peerDbSearch(n.id, ip)
+				if err == nil && !pEntry.isLocal {
 					t = time.Now()
-					n.programNeighbor(ip)
+					logrus.Warnf("miss notification for peer:%+v l3Miss:%t l2Miss:%t, if the problem persist check the gc_thresh on the host pKey:%+v pEntry:%+v err:%v",
+						neigh, l3Miss, l2Miss, *pKey, *pEntry, err)
 				}
 			}
 		}
 	}
 }
 
-func (n *network) programNeighbor(ip net.IP) {
-	peerMac, _, _, err := n.driver.peerDbSearch(n.id, ip)
-	if err != nil {
-		logrus.Errorf("Reprogramming on L3 miss failed for %s, no peer entry", ip)
-		return
-	}
-	s := n.getSubnetforIPAddr(ip)
-	if s == nil {
-		logrus.Errorf("Reprogramming on L3 miss failed for %s, not a valid subnet", ip)
-		return
-	}
-	sbox := n.sandbox()
-	if sbox == nil {
-		logrus.Errorf("Reprogramming on L3 miss failed for %s, overlay sandbox missing", ip)
-		return
-	}
-	if err := sbox.AddNeighbor(ip, peerMac, true, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {
-		logrus.Errorf("Reprogramming on L3 miss failed for %s: %v", ip, err)
-		return
-	}
-}
-
 func (d *driver) addNetwork(n *network) {
 	d.Lock()
 	d.networks[n.id] = n
@@ -1058,7 +1035,7 @@
 		}
 
 		if s.vni == 0 {
-			vxlanID, err := n.driver.vxlanIdm.GetID()
+			vxlanID, err := n.driver.vxlanIdm.GetID(true)
 			if err != nil {
 				return fmt.Errorf("failed to allocate vxlan id: %v", err)
 			}
@@ -1090,15 +1067,6 @@
 	return false
 }
 
-func (n *network) getSubnetforIPAddr(ip net.IP) *subnet {
-	for _, s := range n.subnets {
-		if s.subnetIP.Contains(ip) {
-			return s
-		}
-	}
-	return nil
-}
-
 // getSubnetforIP returns the subnet to which the given IP belongs
 func (n *network) getSubnetforIP(ip *net.IPNet) *subnet {
 	for _, s := range n.subnets {
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go
index 6e034ad..f644799 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ov_serf.go
@@ -122,7 +122,7 @@
 	case "join":
 		d.peerAdd(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr), false, false, false)
 	case "leave":
-		d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr))
+		d.peerDelete(nid, eid, net.ParseIP(ipStr), net.IPMask(net.ParseIP(maskStr).To4()), mac, net.ParseIP(vtepStr), false)
 	}
 }
 
@@ -135,13 +135,13 @@
 		fmt.Printf("Failed to scan query payload string: %v\n", err)
 	}
 
-	peerMac, peerIPMask, vtep, err := d.peerDbSearch(nid, net.ParseIP(ipStr))
+	pKey, pEntry, err := d.peerDbSearch(nid, net.ParseIP(ipStr))
 	if err != nil {
 		return
 	}
 
-	logrus.Debugf("Sending peer query resp mac %s, mask %s, vtep %s", peerMac, net.IP(peerIPMask), vtep)
-	q.Respond([]byte(fmt.Sprintf("%s %s %s", peerMac.String(), net.IP(peerIPMask).String(), vtep.String())))
+	logrus.Debugf("Sending peer query resp mac %v, mask %s, vtep %s", pKey.peerMac, net.IP(pEntry.peerIPMask).String(), pEntry.vtep)
+	q.Respond([]byte(fmt.Sprintf("%s %s %s", pKey.peerMac.String(), net.IP(pEntry.peerIPMask).String(), pEntry.vtep.String())))
 }
 
 func (d *driver) resolvePeer(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go b/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
index e624aad..f029c5c 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.go
@@ -162,7 +162,7 @@
 		Ifaces := make(map[string][]osl.IfaceOption)
 		vethIfaceOption := make([]osl.IfaceOption, 1)
 		vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName))
-		Ifaces[fmt.Sprintf("%s+%s", "veth", "veth")] = vethIfaceOption
+		Ifaces["veth+veth"] = vethIfaceOption
 
 		err := n.sbox.Restore(Ifaces, nil, nil, nil)
 		if err != nil {
@@ -262,7 +262,7 @@
 		d.Unlock()
 
 		// If containers are already running on this network update the
-		// advertiseaddress in the peerDB
+		// advertise address in the peerDB
 		d.localJoinOnce.Do(func() {
 			d.peerDBUpdateSelf()
 		})
@@ -270,7 +270,7 @@
 		// If there is no cluster store there is no need to start serf.
 		if d.store != nil {
 			if err := validateSelf(advertiseAddress); err != nil {
-				logrus.Warnf("%s", err.Error())
+				logrus.Warn(err.Error())
 			}
 			err := d.serfInit()
 			if err != nil {
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.proto b/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.proto
index 45b8c9d..3133386 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.proto
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/overlay.proto
@@ -24,4 +24,4 @@
 	// which this container is running and can be reached by
 	// building a tunnel to that host IP.
 	string tunnel_endpoint_ip = 3 [(gogoproto.customname) = "TunnelEndpointIP"];
-}
\ No newline at end of file
+}
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go b/vendor/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go
index a80f335..58cc687 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go
@@ -165,7 +165,7 @@
 	n.Unlock()
 
 	if vni == 0 {
-		vni, err = n.driver.vxlanIdm.GetIDInRange(vxlanIDStart, vxlanIDEnd)
+		vni, err = n.driver.vxlanIdm.GetIDInRange(vxlanIDStart, vxlanIDEnd, true)
 		if err != nil {
 			return err
 		}
diff --git a/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go b/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
index f953e3c..bdd3cb1 100644
--- a/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
+++ b/vendor/github.com/docker/libnetwork/drivers/overlay/peerdb.go
@@ -8,6 +8,7 @@
 	"syscall"
 
 	"github.com/docker/libnetwork/common"
+	"github.com/docker/libnetwork/osl"
 	"github.com/sirupsen/logrus"
 )
 
@@ -22,16 +23,48 @@
 	eid        string
 	vtep       net.IP
 	peerIPMask net.IPMask
-	inSandbox  bool
 	isLocal    bool
 }
 
+func (p *peerEntry) MarshalDB() peerEntryDB {
+	ones, bits := p.peerIPMask.Size()
+	return peerEntryDB{
+		eid:            p.eid,
+		vtep:           p.vtep.String(),
+		peerIPMaskOnes: ones,
+		peerIPMaskBits: bits,
+		isLocal:        p.isLocal,
+	}
+}
+
+// This the structure saved into the set (SetMatrix), due to the implementation of it
+// the value inserted in the set has to be Hashable so the []byte had to be converted into
+// strings
+type peerEntryDB struct {
+	eid            string
+	vtep           string
+	peerIPMaskOnes int
+	peerIPMaskBits int
+	isLocal        bool
+}
+
+func (p *peerEntryDB) UnMarshalDB() peerEntry {
+	return peerEntry{
+		eid:        p.eid,
+		vtep:       net.ParseIP(p.vtep),
+		peerIPMask: net.CIDRMask(p.peerIPMaskOnes, p.peerIPMaskBits),
+		isLocal:    p.isLocal,
+	}
+}
+
 type peerMap struct {
-	mp map[string]peerEntry
+	// set of peerEntry, note they have to be objects and not pointers to maintain the proper equality checks
+	mp common.SetMatrix
 	sync.Mutex
 }
 
 type peerNetworkMap struct {
+	// map with key peerKey
 	mp map[string]*peerMap
 	sync.Mutex
 }
@@ -54,11 +87,7 @@
 	}
 
 	pKey.peerMac, err = net.ParseMAC(string(macB))
-	if err != nil {
-		return err
-	}
-
-	return nil
+	return err
 }
 
 func (d *driver) peerDbWalk(f func(string, *peerKey, *peerEntry) bool) error {
@@ -87,10 +116,13 @@
 	}
 
 	mp := map[string]peerEntry{}
-
 	pMap.Lock()
-	for pKeyStr, pEntry := range pMap.mp {
-		mp[pKeyStr] = pEntry
+	for _, pKeyStr := range pMap.mp.Keys() {
+		entryDBList, ok := pMap.mp.Get(pKeyStr)
+		if ok {
+			peerEntryDB := entryDBList[0].(peerEntryDB)
+			mp[pKeyStr] = peerEntryDB.UnMarshalDB()
+		}
 	}
 	pMap.Unlock()
 
@@ -107,45 +139,38 @@
 	return nil
 }
 
-func (d *driver) peerDbSearch(nid string, peerIP net.IP) (net.HardwareAddr, net.IPMask, net.IP, error) {
-	var (
-		peerMac    net.HardwareAddr
-		vtep       net.IP
-		peerIPMask net.IPMask
-		found      bool
-	)
-
+func (d *driver) peerDbSearch(nid string, peerIP net.IP) (*peerKey, *peerEntry, error) {
+	var pKeyMatched *peerKey
+	var pEntryMatched *peerEntry
 	err := d.peerDbNetworkWalk(nid, func(pKey *peerKey, pEntry *peerEntry) bool {
 		if pKey.peerIP.Equal(peerIP) {
-			peerMac = pKey.peerMac
-			peerIPMask = pEntry.peerIPMask
-			vtep = pEntry.vtep
-			found = true
-			return found
+			pKeyMatched = pKey
+			pEntryMatched = pEntry
+			return true
 		}
 
-		return found
+		return false
 	})
 
 	if err != nil {
-		return nil, nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
+		return nil, nil, fmt.Errorf("peerdb search for peer ip %q failed: %v", peerIP, err)
 	}
 
-	if !found {
-		return nil, nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
+	if pKeyMatched == nil || pEntryMatched == nil {
+		return nil, nil, fmt.Errorf("peer ip %q not found in peerdb", peerIP)
 	}
 
-	return peerMac, peerIPMask, vtep, nil
+	return pKeyMatched, pEntryMatched, nil
 }
 
 func (d *driver) peerDbAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP, isLocal bool) {
+	peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
 
 	d.peerDb.Lock()
 	pMap, ok := d.peerDb.mp[nid]
 	if !ok {
 		d.peerDb.mp[nid] = &peerMap{
-			mp: make(map[string]peerEntry),
+			mp: common.NewSetMatrix(),
 		}
 
 		pMap = d.peerDb.mp[nid]
@@ -165,18 +190,24 @@
 	}
 
 	pMap.Lock()
-	pMap.mp[pKey.String()] = pEntry
-	pMap.Unlock()
+	defer pMap.Unlock()
+	b, i := pMap.mp.Insert(pKey.String(), pEntry.MarshalDB())
+	if i != 1 {
+		// Transient case, there is more than one endpoint that is using the same IP,MAC pair
+		s, _ := pMap.mp.String(pKey.String())
+		logrus.Warnf("peerDbAdd transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s)
+	}
+	return b, i
 }
 
 func (d *driver) peerDbDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP) peerEntry {
+	peerMac net.HardwareAddr, vtep net.IP, isLocal bool) (bool, int) {
 
 	d.peerDb.Lock()
 	pMap, ok := d.peerDb.mp[nid]
 	if !ok {
 		d.peerDb.Unlock()
-		return peerEntry{}
+		return false, 0
 	}
 	d.peerDb.Unlock()
 
@@ -185,22 +216,22 @@
 		peerMac: peerMac,
 	}
 
-	pMap.Lock()
-
-	pEntry, ok := pMap.mp[pKey.String()]
-	if ok {
-		// Mismatched endpoint ID(possibly outdated). Do not
-		// delete peerdb
-		if pEntry.eid != eid {
-			pMap.Unlock()
-			return pEntry
-		}
+	pEntry := peerEntry{
+		eid:        eid,
+		vtep:       vtep,
+		peerIPMask: peerIPMask,
+		isLocal:    isLocal,
 	}
 
-	delete(pMap.mp, pKey.String())
-	pMap.Unlock()
-
-	return pEntry
+	pMap.Lock()
+	defer pMap.Unlock()
+	b, i := pMap.mp.Remove(pKey.String(), pEntry.MarshalDB())
+	if i != 0 {
+		// Transient case, there is more than one endpoint that is using the same IP,MAC pair
+		s, _ := pMap.mp.String(pKey.String())
+		logrus.Warnf("peerDbDelete transient condition - Key:%s cardinality:%d db state:%s", pKey.String(), i, s)
+	}
+	return b, i
 }
 
 // The overlay uses a lazy initialization approach, this means that when a network is created
@@ -224,6 +255,7 @@
 	peerOperationINIT peerOperationType = iota
 	peerOperationADD
 	peerOperationDELETE
+	peerOperationFLUSH
 )
 
 type peerOperation struct {
@@ -253,7 +285,9 @@
 			case peerOperationADD:
 				err = d.peerAddOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP, op.l2Miss, op.l3Miss, true, op.localPeer)
 			case peerOperationDELETE:
-				err = d.peerDeleteOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP)
+				err = d.peerDeleteOp(op.networkID, op.endpointID, op.peerIP, op.peerIPMask, op.peerMac, op.vtepIP, op.localPeer)
+			case peerOperationFLUSH:
+				err = d.peerFlushOp(op.networkID)
 			}
 			if err != nil {
 				logrus.Warnf("Peer operation failed:%s op:%v", err, op)
@@ -286,7 +320,6 @@
 
 func (d *driver) peerAdd(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
 	peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, localPeer bool) {
-	callerName := common.CallerName(1)
 	d.peerOpCh <- &peerOperation{
 		opType:     peerOperationADD,
 		networkID:  nid,
@@ -298,24 +331,32 @@
 		l2Miss:     l2Miss,
 		l3Miss:     l3Miss,
 		localPeer:  localPeer,
-		callerName: callerName,
+		callerName: common.CallerName(1),
 	}
 }
 
 func (d *driver) peerAddOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, updateDB, updateOnlyDB bool) error {
+	peerMac net.HardwareAddr, vtep net.IP, l2Miss, l3Miss, updateDB, localPeer bool) error {
 
 	if err := validateID(nid, eid); err != nil {
 		return err
 	}
 
+	var dbEntries int
+	var inserted bool
 	if updateDB {
-		d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, false)
-		if updateOnlyDB {
-			return nil
+		inserted, dbEntries = d.peerDbAdd(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer)
+		if !inserted {
+			logrus.Warnf("Entry already present in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v",
+				nid, eid, peerIP, peerMac, localPeer, vtep)
 		}
 	}
 
+	// Local peers do not need any further configuration
+	if localPeer {
+		return nil
+	}
+
 	n := d.network(nid)
 	if n == nil {
 		return nil
@@ -353,21 +394,26 @@
 
 	// Add neighbor entry for the peer IP
 	if err := sbox.AddNeighbor(peerIP, peerMac, l3Miss, sbox.NeighborOptions().LinkName(s.vxlanName)); err != nil {
-		return fmt.Errorf("could not add neighbor entry into the sandbox: %v", err)
+		if _, ok := err.(osl.NeighborSearchError); ok && dbEntries > 1 {
+			// We are in the transient case so only the first configuration is programmed into the kernel
+			// Upon deletion if the active configuration is deleted the next one from the database will be restored
+			// Note we are skipping also the next configuration
+			return nil
+		}
+		return fmt.Errorf("could not add neighbor entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
 	}
 
 	// Add fdb entry to the bridge for the peer mac
 	if err := sbox.AddNeighbor(vtep, peerMac, l2Miss, sbox.NeighborOptions().LinkName(s.vxlanName),
 		sbox.NeighborOptions().Family(syscall.AF_BRIDGE)); err != nil {
-		return fmt.Errorf("could not add fdb entry into the sandbox: %v", err)
+		return fmt.Errorf("could not add fdb entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
 	}
 
 	return nil
 }
 
 func (d *driver) peerDelete(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP) {
-	callerName := common.CallerName(1)
+	peerMac net.HardwareAddr, vtep net.IP, localPeer bool) {
 	d.peerOpCh <- &peerOperation{
 		opType:     peerOperationDELETE,
 		networkID:  nid,
@@ -376,18 +422,23 @@
 		peerIPMask: peerIPMask,
 		peerMac:    peerMac,
 		vtepIP:     vtep,
-		callerName: callerName,
+		callerName: common.CallerName(1),
+		localPeer:  localPeer,
 	}
 }
 
 func (d *driver) peerDeleteOp(nid, eid string, peerIP net.IP, peerIPMask net.IPMask,
-	peerMac net.HardwareAddr, vtep net.IP) error {
+	peerMac net.HardwareAddr, vtep net.IP, localPeer bool) error {
 
 	if err := validateID(nid, eid); err != nil {
 		return err
 	}
 
-	pEntry := d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep)
+	deleted, dbEntries := d.peerDbDelete(nid, eid, peerIP, peerIPMask, peerMac, vtep, localPeer)
+	if !deleted {
+		logrus.Warnf("Entry was not in db: nid:%s eid:%s peerIP:%v peerMac:%v isLocal:%t vtep:%v",
+			nid, eid, peerIP, peerMac, localPeer, vtep)
+	}
 
 	n := d.network(nid)
 	if n == nil {
@@ -399,30 +450,59 @@
 		return nil
 	}
 
-	// Delete fdb entry to the bridge for the peer mac only if the
-	// entry existed in local peerdb. If it is a stale delete
-	// request, still call DeleteNeighbor but only to cleanup any
-	// leftover sandbox neighbor cache and not actually delete the
-	// kernel state.
-	if (eid == pEntry.eid && vtep.Equal(pEntry.vtep)) ||
-		(eid != pEntry.eid && !vtep.Equal(pEntry.vtep)) {
-		if err := sbox.DeleteNeighbor(vtep, peerMac,
-			eid == pEntry.eid && vtep.Equal(pEntry.vtep)); err != nil {
-			return fmt.Errorf("could not delete fdb entry into the sandbox: %v", err)
-		}
-	}
-
-	// Delete neighbor entry for the peer IP
-	if eid == pEntry.eid {
-		if err := sbox.DeleteNeighbor(peerIP, peerMac, true); err != nil {
-			return fmt.Errorf("could not delete neighbor entry into the sandbox: %v", err)
-		}
-	}
-
-	if err := d.checkEncryption(nid, vtep, 0, false, false); err != nil {
+	if err := d.checkEncryption(nid, vtep, 0, localPeer, false); err != nil {
 		logrus.Warn(err)
 	}
 
+	// Local peers do not have any local configuration to delete
+	if !localPeer {
+		// Remove fdb entry to the bridge for the peer mac
+		if err := sbox.DeleteNeighbor(vtep, peerMac, true); err != nil {
+			if _, ok := err.(osl.NeighborSearchError); ok && dbEntries > 0 {
+				// We fall in here if there is a transient state and if the neighbor that is being deleted
+				// was never been configured into the kernel (we allow only 1 configuration at the time per <ip,mac> mapping)
+				return nil
+			}
+			return fmt.Errorf("could not delete fdb entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
+		}
+
+		// Delete neighbor entry for the peer IP
+		if err := sbox.DeleteNeighbor(peerIP, peerMac, true); err != nil {
+			return fmt.Errorf("could not delete neighbor entry for nid:%s eid:%s into the sandbox:%v", nid, eid, err)
+		}
+	}
+
+	if dbEntries == 0 {
+		return nil
+	}
+
+	// If there is still an entry into the database and the deletion went through without errors means that there is now no
+	// configuration active in the kernel.
+	// Restore one configuration for the <ip,mac> directly from the database, note that is guaranteed that there is one
+	peerKey, peerEntry, err := d.peerDbSearch(nid, peerIP)
+	if err != nil {
+		logrus.Errorf("peerDeleteOp unable to restore a configuration for nid:%s ip:%v mac:%v err:%s", nid, peerIP, peerMac, err)
+		return err
+	}
+	return d.peerAddOp(nid, peerEntry.eid, peerIP, peerEntry.peerIPMask, peerKey.peerMac, peerEntry.vtep, false, false, false, peerEntry.isLocal)
+}
+
+func (d *driver) peerFlush(nid string) {
+	d.peerOpCh <- &peerOperation{
+		opType:     peerOperationFLUSH,
+		networkID:  nid,
+		callerName: common.CallerName(1),
+	}
+}
+
+func (d *driver) peerFlushOp(nid string) error {
+	d.peerDb.Lock()
+	defer d.peerDb.Unlock()
+	_, ok := d.peerDb.mp[nid]
+	if !ok {
+		return fmt.Errorf("Unable to find the peerDB for nid:%s", nid)
+	}
+	delete(d.peerDb.mp, nid)
 	return nil
 }
 
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go
index 21b33ff..9df7a58 100644
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go
+++ b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_endpoint.go
@@ -134,11 +134,7 @@
 		return fmt.Errorf("overlay local store not initialized, ep not deleted")
 	}
 
-	if err := d.localStore.DeleteObjectAtomic(e); err != nil {
-		return err
-	}
-
-	return nil
+	return d.localStore.DeleteObjectAtomic(e)
 }
 
 func (d *driver) writeEndpointToStore(e *endpoint) error {
@@ -146,10 +142,7 @@
 		return fmt.Errorf("overlay local store not initialized, ep not added")
 	}
 
-	if err := d.localStore.PutObjectAtomic(e); err != nil {
-		return err
-	}
-	return nil
+	return d.localStore.PutObjectAtomic(e)
 }
 
 func (ep *endpoint) DataScope() string {
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go
index b545bc8..039dbd5 100644
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go
+++ b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/ov_network.go
@@ -331,7 +331,7 @@
 	brIfaceOption := make([]osl.IfaceOption, 2)
 	brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Address(s.gwIP))
 	brIfaceOption = append(brIfaceOption, sbox.InterfaceOptions().Bridge(true))
-	Ifaces[fmt.Sprintf("%s+%s", brName, "br")] = brIfaceOption
+	Ifaces[brName+"+br"] = brIfaceOption
 
 	err := sbox.Restore(Ifaces, nil, nil, nil)
 	if err != nil {
@@ -341,7 +341,7 @@
 	Ifaces = make(map[string][]osl.IfaceOption)
 	vxlanIfaceOption := make([]osl.IfaceOption, 1)
 	vxlanIfaceOption = append(vxlanIfaceOption, sbox.InterfaceOptions().Master(brName))
-	Ifaces[fmt.Sprintf("%s+%s", vxlanName, "vxlan")] = vxlanIfaceOption
+	Ifaces[vxlanName+"+vxlan"] = vxlanIfaceOption
 	err = sbox.Restore(Ifaces, nil, nil, nil)
 	if err != nil {
 		return err
@@ -718,7 +718,7 @@
 		}
 
 		if s.vni == 0 {
-			vxlanID, err := n.driver.vxlanIdm.GetID()
+			vxlanID, err := n.driver.vxlanIdm.GetID(true)
 			if err != nil {
 				return fmt.Errorf("failed to allocate vxlan id: %v", err)
 			}
diff --git a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go
index 45b62b1..92b0a4e 100644
--- a/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go
+++ b/vendor/github.com/docker/libnetwork/drivers/solaris/overlay/overlay.go
@@ -141,7 +141,7 @@
 		Ifaces := make(map[string][]osl.IfaceOption)
 		vethIfaceOption := make([]osl.IfaceOption, 1)
 		vethIfaceOption = append(vethIfaceOption, n.sbox.InterfaceOptions().Master(s.brName))
-		Ifaces[fmt.Sprintf("%s+%s", "veth", "veth")] = vethIfaceOption
+		Ifaces["veth+veth"] = vethIfaceOption
 
 		err := n.sbox.Restore(Ifaces, nil, nil, nil)
 		if err != nil {
@@ -234,7 +234,7 @@
 		// If there is no cluster store there is no need to start serf.
 		if d.store != nil {
 			if err := validateSelf(advertiseAddress); err != nil {
-				logrus.Warnf("%s", err.Error())
+				logrus.Warn(err.Error())
 			}
 			err := d.serfInit()
 			if err != nil {
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go
index cded48a..83bee5a 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/joinleave_windows.go
@@ -39,6 +39,11 @@
 	if err := jinfo.AddTableEntry(ovPeerTable, eid, buf); err != nil {
 		logrus.Errorf("overlay: Failed adding table entry to joininfo: %v", err)
 	}
+
+	if ep.disablegateway {
+		jinfo.DisableGatewayService()
+	}
+
 	return nil
 }
 
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go
index 47af64c..b7bda4a 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_endpoint_windows.go
@@ -6,7 +6,11 @@
 	"net"
 
 	"github.com/Microsoft/hcsshim"
+	"github.com/docker/docker/pkg/system"
 	"github.com/docker/libnetwork/driverapi"
+	"github.com/docker/libnetwork/drivers/windows"
+	"github.com/docker/libnetwork/netlabel"
+	"github.com/docker/libnetwork/types"
 	"github.com/sirupsen/logrus"
 )
 
@@ -15,12 +19,14 @@
 const overlayEndpointPrefix = "overlay/endpoint"
 
 type endpoint struct {
-	id        string
-	nid       string
-	profileId string
-	remote    bool
-	mac       net.HardwareAddr
-	addr      *net.IPNet
+	id             string
+	nid            string
+	profileID      string
+	remote         bool
+	mac            net.HardwareAddr
+	addr           *net.IPNet
+	disablegateway bool
+	portMapping    []types.PortBinding // Operation port bindings
 }
 
 func validateID(nid, eid string) error {
@@ -71,7 +77,7 @@
 
 	if networkEndpoint != nil {
 		logrus.Debugf("Removing stale endpoint from HNS")
-		_, err := hcsshim.HNSEndpointRequest("DELETE", networkEndpoint.profileId, "")
+		_, err := hcsshim.HNSEndpointRequest("DELETE", networkEndpoint.profileID, "")
 
 		if err != nil {
 			logrus.Debugf("Failed to delete stale overlay endpoint (%s) from hns", networkEndpoint.id[0:7])
@@ -96,7 +102,7 @@
 		logrus.Debugf("Deleting stale endpoint %s", eid)
 		n.deleteEndpoint(eid)
 
-		_, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "")
+		_, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "")
 		if err != nil {
 			return err
 		}
@@ -113,17 +119,19 @@
 		return fmt.Errorf("create endpoint was not passed interface IP address")
 	}
 
-	if s := n.getSubnetforIP(ep.addr); s == nil {
-		return fmt.Errorf("no matching subnet for IP %q in network %q\n", ep.addr, nid)
+	s := n.getSubnetforIP(ep.addr)
+	if s == nil {
+		return fmt.Errorf("no matching subnet for IP %q in network %q", ep.addr, nid)
 	}
 
 	// Todo: Add port bindings and qos policies here
 
 	hnsEndpoint := &hcsshim.HNSEndpoint{
 		Name:              eid,
-		VirtualNetwork:    n.hnsId,
+		VirtualNetwork:    n.hnsID,
 		IPAddress:         ep.addr.IP,
 		EnableInternalDNS: true,
+		GatewayAddress:    s.gwIP.String(),
 	}
 
 	if ep.mac != nil {
@@ -141,6 +149,31 @@
 
 	hnsEndpoint.Policies = append(hnsEndpoint.Policies, paPolicy)
 
+	if system.GetOSVersion().Build > 16236 {
+		natPolicy, err := json.Marshal(hcsshim.PaPolicy{
+			Type: "OutBoundNAT",
+		})
+
+		if err != nil {
+			return err
+		}
+
+		hnsEndpoint.Policies = append(hnsEndpoint.Policies, natPolicy)
+
+		epConnectivity, err := windows.ParseEndpointConnectivity(epOptions)
+		if err != nil {
+			return err
+		}
+
+		pbPolicy, err := windows.ConvertPortBindings(epConnectivity.PortBindings)
+		if err != nil {
+			return err
+		}
+		hnsEndpoint.Policies = append(hnsEndpoint.Policies, pbPolicy...)
+
+		ep.disablegateway = true
+	}
+
 	configurationb, err := json.Marshal(hnsEndpoint)
 	if err != nil {
 		return err
@@ -151,7 +184,7 @@
 		return err
 	}
 
-	ep.profileId = hnsresponse.Id
+	ep.profileID = hnsresponse.Id
 
 	if ep.mac == nil {
 		ep.mac, err = net.ParseMAC(hnsresponse.MacAddress)
@@ -164,6 +197,12 @@
 		}
 	}
 
+	ep.portMapping, err = windows.ParsePortBindingPolicies(hnsresponse.Policies)
+	if err != nil {
+		hcsshim.HNSEndpointRequest("DELETE", hnsresponse.Id, "")
+		return err
+	}
+
 	n.addEndpoint(ep)
 
 	return nil
@@ -186,7 +225,7 @@
 
 	n.deleteEndpoint(eid)
 
-	_, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "")
+	_, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "")
 	if err != nil {
 		return err
 	}
@@ -210,7 +249,17 @@
 	}
 
 	data := make(map[string]interface{}, 1)
-	data["hnsid"] = ep.profileId
+	data["hnsid"] = ep.profileID
 	data["AllowUnqualifiedDNSQuery"] = true
+
+	if ep.portMapping != nil {
+		// Return a copy of the operational data
+		pmc := make([]types.PortBinding, 0, len(ep.portMapping))
+		for _, pm := range ep.portMapping {
+			pmc = append(pmc, pm.GetCopy())
+		}
+		data[netlabel.PortMap] = pmc
+	}
+
 	return data, nil
 }
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go
index 70c4f02..9cc46f8 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/ov_network_windows.go
@@ -37,7 +37,7 @@
 type network struct {
 	id              string
 	name            string
-	hnsId           string
+	hnsID           string
 	providerAddress string
 	interfaceName   string
 	endpoints       endpointTable
@@ -108,7 +108,7 @@
 		case "com.docker.network.windowsshim.interface":
 			interfaceName = value
 		case "com.docker.network.windowsshim.hnsid":
-			n.hnsId = value
+			n.hnsID = value
 		case netlabel.OverlayVxlanIDList:
 			vniStrings := strings.Split(value, ",")
 			for _, vniStr := range vniStrings {
@@ -181,7 +181,7 @@
 	if err != nil {
 		d.deleteNetwork(id)
 	} else {
-		genData["com.docker.network.windowsshim.hnsid"] = n.hnsId
+		genData["com.docker.network.windowsshim.hnsid"] = n.hnsID
 	}
 
 	return err
@@ -197,7 +197,7 @@
 		return types.ForbiddenErrorf("could not find network with id %s", nid)
 	}
 
-	_, err := hcsshim.HNSNetworkRequest("DELETE", n.hnsId, "")
+	_, err := hcsshim.HNSNetworkRequest("DELETE", n.hnsID, "")
 	if err != nil {
 		return types.ForbiddenErrorf(err.Error())
 	}
@@ -242,7 +242,7 @@
 // 	}
 
 // 	for _, endpoint := range hnsresponse {
-// 		if endpoint.VirtualNetwork != n.hnsId {
+// 		if endpoint.VirtualNetwork != n.hnsID {
 // 			continue
 // 		}
 
@@ -260,7 +260,7 @@
 func (n *network) convertToOverlayEndpoint(v *hcsshim.HNSEndpoint) *endpoint {
 	ep := &endpoint{
 		id:        v.Name,
-		profileId: v.Id,
+		profileID: v.Id,
 		nid:       n.id,
 		remote:    v.IsRemoteEndpoint,
 	}
@@ -311,6 +311,7 @@
 		Type:               d.Type(),
 		Subnets:            subnets,
 		NetworkAdapterName: n.interfaceName,
+		AutomaticDNS:       true,
 	}
 
 	configurationb, err := json.Marshal(network)
@@ -326,7 +327,7 @@
 		return err
 	}
 
-	n.hnsId = hnsresponse.Id
+	n.hnsID = hnsresponse.Id
 	n.providerAddress = hnsresponse.ManagementIP
 
 	return nil
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go
index d415beb..65ad62a 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/overlay_windows.go
@@ -104,7 +104,7 @@
 func (d *driver) convertToOverlayNetwork(v *hcsshim.HNSNetwork) *network {
 	n := &network{
 		id:              v.Name,
-		hnsId:           v.Id,
+		hnsID:           v.Id,
 		driver:          d,
 		endpoints:       endpointTable{},
 		subnets:         []*subnet{},
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go
index 0abc232..159bfd6 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/overlay/peerdb_windows.go
@@ -33,7 +33,7 @@
 
 		hnsEndpoint := &hcsshim.HNSEndpoint{
 			Name:             eid,
-			VirtualNetwork:   n.hnsId,
+			VirtualNetwork:   n.hnsID,
 			MacAddress:       peerMac.String(),
 			IPAddress:        peerIP,
 			IsRemoteEndpoint: true,
@@ -78,7 +78,7 @@
 			nid:       nid,
 			addr:      addr,
 			mac:       peerMac,
-			profileId: hnsresponse.Id,
+			profileID: hnsresponse.Id,
 			remote:    true,
 		}
 
@@ -108,7 +108,7 @@
 	}
 
 	if updateDb {
-		_, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileId, "")
+		_, err := hcsshim.HNSEndpointRequest("DELETE", ep.profileID, "")
 		if err != nil {
 			return err
 		}
diff --git a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
index 45f835e..4a03e72 100644
--- a/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
+++ b/vendor/github.com/docker/libnetwork/drivers/windows/windows.go
@@ -55,19 +55,27 @@
 	DisableICC  bool
 }
 
-type endpointConnectivity struct {
+// EndpointConnectivity stores the port bindings and exposed ports that the user has specified in epOptions.
+type EndpointConnectivity struct {
 	PortBindings []types.PortBinding
 	ExposedPorts []types.TransportPort
 }
 
 type hnsEndpoint struct {
-	id             string
-	nid            string
-	profileID      string
-	Type           string
+	id        string
+	nid       string
+	profileID string
+	Type      string
+	//Note: Currently, the sandboxID is the same as the containerID since windows does
+	//not expose the sandboxID.
+	//In the future, windows will support a proper sandboxID that is different
+	//than the containerID.
+	//Therefore, we are using sandboxID now, so that we won't have to change this code
+	//when windows properly supports a sandboxID.
+	sandboxID      string
 	macAddress     net.HardwareAddr
 	epOption       *endpointOption       // User specified parameters
-	epConnectivity *endpointConnectivity // User specified parameters
+	epConnectivity *EndpointConnectivity // User specified parameters
 	portMapping    []types.PortBinding   // Operation port bindings
 	addr           *net.IPNet
 	gateway        net.IP
@@ -95,7 +103,7 @@
 	errNotFound = "HNS failed with error : The object identifier does not represent a valid object. "
 )
 
-// IsBuiltinWindowsDriver vaidates if network-type is a builtin local-scoped driver
+// IsBuiltinLocalDriver validates if network-type is a builtin local-scoped driver
 func IsBuiltinLocalDriver(networkType string) bool {
 	if "l2bridge" == networkType || "l2tunnel" == networkType || "nat" == networkType || "ics" == networkType || "transparent" == networkType {
 		return true
@@ -396,7 +404,8 @@
 	return qps, nil
 }
 
-func convertPortBindings(portBindings []types.PortBinding) ([]json.RawMessage, error) {
+// ConvertPortBindings converts PortBindings to JSON for HNS request
+func ConvertPortBindings(portBindings []types.PortBinding) ([]json.RawMessage, error) {
 	var pbs []json.RawMessage
 
 	// Enumerate through the port bindings specified by the user and convert
@@ -431,7 +440,8 @@
 	return pbs, nil
 }
 
-func parsePortBindingPolicies(policies []json.RawMessage) ([]types.PortBinding, error) {
+// ParsePortBindingPolicies parses HNS endpoint response message to PortBindings
+func ParsePortBindingPolicies(policies []json.RawMessage) ([]types.PortBinding, error) {
 	var bindings []types.PortBinding
 	hcsPolicy := &hcsshim.NatPolicy{}
 
@@ -505,12 +515,13 @@
 	return ec, nil
 }
 
-func parseEndpointConnectivity(epOptions map[string]interface{}) (*endpointConnectivity, error) {
+// ParseEndpointConnectivity parses options passed to CreateEndpoint, specifically port bindings, and store in a endpointConnectivity object.
+func ParseEndpointConnectivity(epOptions map[string]interface{}) (*EndpointConnectivity, error) {
 	if epOptions == nil {
 		return nil, nil
 	}
 
-	ec := &endpointConnectivity{}
+	ec := &EndpointConnectivity{}
 
 	if opt, ok := epOptions[netlabel.PortMap]; ok {
 		if bs, ok := opt.([]types.PortBinding); ok {
@@ -550,7 +561,7 @@
 	if err != nil {
 		return err
 	}
-	epConnectivity, err := parseEndpointConnectivity(epOptions)
+	epConnectivity, err := ParseEndpointConnectivity(epOptions)
 	if err != nil {
 		return err
 	}
@@ -561,7 +572,7 @@
 		endpointStruct.MacAddress = strings.Replace(macAddress.String(), ":", "-", -1)
 	}
 
-	endpointStruct.Policies, err = convertPortBindings(epConnectivity.PortBindings)
+	endpointStruct.Policies, err = ConvertPortBindings(epConnectivity.PortBindings)
 	if err != nil {
 		return err
 	}
@@ -615,7 +626,7 @@
 	endpoint.profileID = hnsresponse.Id
 	endpoint.epConnectivity = epConnectivity
 	endpoint.epOption = epOption
-	endpoint.portMapping, err = parsePortBindingPolicies(hnsresponse.Policies)
+	endpoint.portMapping, err = ParsePortBindingPolicies(hnsresponse.Policies)
 
 	if err != nil {
 		hcsshim.HNSEndpointRequest("DELETE", hnsresponse.Id, "")
@@ -635,7 +646,7 @@
 	}
 
 	if err = d.storeUpdate(endpoint); err != nil {
-		return fmt.Errorf("failed to save endpoint %s to store: %v", endpoint.id[0:7], err)
+		logrus.Errorf("Failed to save endpoint %s to store: %v", endpoint.id[0:7], err)
 	}
 
 	return nil
@@ -726,7 +737,15 @@
 		return err
 	}
 
-	// This is just a stub for now
+	endpoint.sandboxID = sboxKey
+
+	err = hcsshim.HotAttachEndpoint(endpoint.sandboxID, endpoint.profileID)
+	if err != nil {
+		// If container doesn't exists in hcs, do not throw error for hot add/remove
+		if err != hcsshim.ErrComputeSystemDoesNotExist {
+			return err
+		}
+	}
 
 	jinfo.DisableGatewayService()
 	return nil
@@ -740,13 +759,18 @@
 	}
 
 	// Ensure that the endpoint exists
-	_, err = network.getEndpoint(eid)
+	endpoint, err := network.getEndpoint(eid)
 	if err != nil {
 		return err
 	}
 
-	// This is just a stub for now
-
+	err = hcsshim.HotDetachEndpoint(endpoint.sandboxID, endpoint.profileID)
+	if err != nil {
+		// If container doesn't exists in hcs, do not throw error for hot add/remove
+		if err != hcsshim.ErrComputeSystemDoesNotExist {
+			return err
+		}
+	}
 	return nil
 }
 
diff --git a/vendor/github.com/docker/libnetwork/endpoint.go b/vendor/github.com/docker/libnetwork/endpoint.go
index 724d0e5..a2d1dbc 100644
--- a/vendor/github.com/docker/libnetwork/endpoint.go
+++ b/vendor/github.com/docker/libnetwork/endpoint.go
@@ -75,6 +75,7 @@
 	dbIndex           uint64
 	dbExists          bool
 	serviceEnabled    bool
+	loadBalancer      bool
 	sync.Mutex
 }
 
@@ -101,6 +102,7 @@
 	epMap["virtualIP"] = ep.virtualIP.String()
 	epMap["ingressPorts"] = ep.ingressPorts
 	epMap["svcAliases"] = ep.svcAliases
+	epMap["loadBalancer"] = ep.loadBalancer
 
 	return json.Marshal(epMap)
 }
@@ -201,6 +203,10 @@
 		ep.virtualIP = net.ParseIP(vip.(string))
 	}
 
+	if v, ok := epMap["loadBalancer"]; ok {
+		ep.loadBalancer = v.(bool)
+	}
+
 	sal, _ := json.Marshal(epMap["svcAliases"])
 	var svcAliases []string
 	json.Unmarshal(sal, &svcAliases)
@@ -238,6 +244,7 @@
 	dstEp.svcName = ep.svcName
 	dstEp.svcID = ep.svcID
 	dstEp.virtualIP = ep.virtualIP
+	dstEp.loadBalancer = ep.loadBalancer
 
 	dstEp.svcAliases = make([]string, len(ep.svcAliases))
 	copy(dstEp.svcAliases, ep.svcAliases)
@@ -985,7 +992,7 @@
 	}
 }
 
-//CreateOptionAlias function returns an option setter for setting endpoint alias
+// CreateOptionAlias function returns an option setter for setting endpoint alias
 func CreateOptionAlias(name string, alias string) EndpointOption {
 	return func(ep *endpoint) {
 		if ep.aliases == nil {
@@ -1006,13 +1013,20 @@
 	}
 }
 
-//CreateOptionMyAlias function returns an option setter for setting endpoint's self alias
+// CreateOptionMyAlias function returns an option setter for setting endpoint's self alias
 func CreateOptionMyAlias(alias string) EndpointOption {
 	return func(ep *endpoint) {
 		ep.myAliases = append(ep.myAliases, alias)
 	}
 }
 
+// CreateOptionLoadBalancer function returns an option setter for denoting the endpoint is a load balancer for a network
+func CreateOptionLoadBalancer() EndpointOption {
+	return func(ep *endpoint) {
+		ep.loadBalancer = true
+	}
+}
+
 // JoinOptionPriority function returns an option setter for priority option to
 // be passed to the endpoint.Join() method.
 func JoinOptionPriority(ep Endpoint, prio int) EndpointOption {
diff --git a/vendor/github.com/docker/libnetwork/endpoint_info.go b/vendor/github.com/docker/libnetwork/endpoint_info.go
index 47bff43..68d3e86 100644
--- a/vendor/github.com/docker/libnetwork/endpoint_info.go
+++ b/vendor/github.com/docker/libnetwork/endpoint_info.go
@@ -31,6 +31,9 @@
 
 	// Sandbox returns the attached sandbox if there, nil otherwise.
 	Sandbox() Sandbox
+
+	// LoadBalancer returns whether the endpoint is the load balancer endpoint for the network.
+	LoadBalancer() bool
 }
 
 // InterfaceInfo provides an interface to retrieve interface addresses bound to the endpoint.
@@ -199,11 +202,7 @@
 		return ep
 	}
 
-	if epi := sb.getEndpoint(ep.ID()); epi != nil {
-		return epi
-	}
-
-	return nil
+	return sb.getEndpoint(ep.ID())
 }
 
 func (ep *endpoint) Iface() InterfaceInfo {
@@ -327,6 +326,12 @@
 	return cnt
 }
 
+func (ep *endpoint) LoadBalancer() bool {
+	ep.Lock()
+	defer ep.Unlock()
+	return ep.loadBalancer
+}
+
 func (ep *endpoint) StaticRoutes() []*types.StaticRoute {
 	ep.Lock()
 	defer ep.Unlock()
diff --git a/vendor/github.com/docker/libnetwork/idm/idm.go b/vendor/github.com/docker/libnetwork/idm/idm.go
index 7e449a0..d5843d4 100644
--- a/vendor/github.com/docker/libnetwork/idm/idm.go
+++ b/vendor/github.com/docker/libnetwork/idm/idm.go
@@ -34,11 +34,11 @@
 }
 
 // GetID returns the first available id in the set
-func (i *Idm) GetID() (uint64, error) {
+func (i *Idm) GetID(serial bool) (uint64, error) {
 	if i.handle == nil {
 		return 0, errors.New("ID set is not initialized")
 	}
-	ordinal, err := i.handle.SetAny()
+	ordinal, err := i.handle.SetAny(serial)
 	return i.start + ordinal, err
 }
 
@@ -56,7 +56,7 @@
 }
 
 // GetIDInRange returns the first available id in the set within a [start,end] range
-func (i *Idm) GetIDInRange(start, end uint64) (uint64, error) {
+func (i *Idm) GetIDInRange(start, end uint64, serial bool) (uint64, error) {
 	if i.handle == nil {
 		return 0, errors.New("ID set is not initialized")
 	}
@@ -65,7 +65,7 @@
 		return 0, errors.New("Requested range does not belong to the set")
 	}
 
-	ordinal, err := i.handle.SetAnyInRange(start-i.start, end-i.start)
+	ordinal, err := i.handle.SetAnyInRange(start-i.start, end-i.start, serial)
 
 	return i.start + ordinal, err
 }
diff --git a/vendor/github.com/docker/libnetwork/ipam/allocator.go b/vendor/github.com/docker/libnetwork/ipam/allocator.go
index 71c9f39..5beb429 100644
--- a/vendor/github.com/docker/libnetwork/ipam/allocator.go
+++ b/vendor/github.com/docker/libnetwork/ipam/allocator.go
@@ -457,7 +457,15 @@
 		return nil, nil, types.InternalErrorf("could not find bitmask in datastore for %s on address %v request from pool %s: %v",
 			k.String(), prefAddress, poolID, err)
 	}
-	ip, err := a.getAddress(p.Pool, bm, prefAddress, p.Range)
+	// In order to request for a serial ip address allocation, callers can pass in the option to request
+	// IP allocation serially or first available IP in the subnet
+	var serial bool
+	if opts != nil {
+		if val, ok := opts[ipamapi.AllocSerialPrefix]; ok {
+			serial = (val == "true")
+		}
+	}
+	ip, err := a.getAddress(p.Pool, bm, prefAddress, p.Range, serial)
 	if err != nil {
 		return nil, nil, err
 	}
@@ -522,7 +530,7 @@
 	return bm.Unset(ipToUint64(h))
 }
 
-func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddress net.IP, ipr *AddressRange) (net.IP, error) {
+func (a *Allocator) getAddress(nw *net.IPNet, bitmask *bitseq.Handle, prefAddress net.IP, ipr *AddressRange, serial bool) (net.IP, error) {
 	var (
 		ordinal uint64
 		err     error
@@ -535,7 +543,7 @@
 		return nil, ipamapi.ErrNoAvailableIPs
 	}
 	if ipr == nil && prefAddress == nil {
-		ordinal, err = bitmask.SetAny()
+		ordinal, err = bitmask.SetAny(serial)
 	} else if prefAddress != nil {
 		hostPart, e := types.GetHostPartIP(prefAddress, base.Mask)
 		if e != nil {
@@ -544,7 +552,7 @@
 		ordinal = ipToUint64(types.GetMinimalIP(hostPart))
 		err = bitmask.Set(ordinal)
 	} else {
-		ordinal, err = bitmask.SetAnyInRange(ipr.Start, ipr.End)
+		ordinal, err = bitmask.SetAnyInRange(ipr.Start, ipr.End, serial)
 	}
 
 	switch err {
@@ -579,7 +587,7 @@
 		s = fmt.Sprintf("\n\n%s Config", as)
 		aSpace.Lock()
 		for k, config := range aSpace.subnets {
-			s = fmt.Sprintf("%s%s", s, fmt.Sprintf("\n%v: %v", k, config))
+			s += fmt.Sprintf("\n%v: %v", k, config)
 			if config.Range == nil {
 				a.retrieveBitmask(k, config.Pool)
 			}
@@ -589,7 +597,7 @@
 
 	s = fmt.Sprintf("%s\n\nBitmasks", s)
 	for k, bm := range a.addresses {
-		s = fmt.Sprintf("%s%s", s, fmt.Sprintf("\n%s: %s", k, bm))
+		s += fmt.Sprintf("\n%s: %s", k, bm)
 	}
 
 	return s
diff --git a/vendor/github.com/docker/libnetwork/ipamapi/labels.go b/vendor/github.com/docker/libnetwork/ipamapi/labels.go
new file mode 100644
index 0000000..e5c7d1c
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/ipamapi/labels.go
@@ -0,0 +1,10 @@
+package ipamapi
+
+const (
+	// Prefix constant marks the reserved label space for libnetwork
+	Prefix = "com.docker.network"
+
+	// AllocSerialPrefix constant marks the reserved label space for libnetwork ipam
+	// allocation ordering.(serial/first available)
+	AllocSerialPrefix = Prefix + ".ipam.serial"
+)
diff --git a/vendor/github.com/docker/libnetwork/iptables/iptables.go b/vendor/github.com/docker/libnetwork/iptables/iptables.go
index ecce0d3..5518fcb 100644
--- a/vendor/github.com/docker/libnetwork/iptables/iptables.go
+++ b/vendor/github.com/docker/libnetwork/iptables/iptables.go
@@ -276,11 +276,7 @@
 		"--dport", strconv.Itoa(destPort),
 		"-j", "MASQUERADE",
 	}
-	if err := ProgramRule(Nat, "POSTROUTING", action, args); err != nil {
-		return err
-	}
-
-	return nil
+	return ProgramRule(Nat, "POSTROUTING", action, args)
 }
 
 // Link adds reciprocal ACCEPT rule for two supplied IP addresses.
@@ -301,10 +297,7 @@
 	// reverse
 	args[7], args[9] = args[9], args[7]
 	args[10] = "--sport"
-	if err := ProgramRule(Filter, c.Name, action, args); err != nil {
-		return err
-	}
-	return nil
+	return ProgramRule(Filter, c.Name, action, args)
 }
 
 // ProgramRule adds the rule specified by args only if the
@@ -463,7 +456,7 @@
 
 // ExistChain checks if a chain exists
 func ExistChain(chain string, table Table) bool {
-	if _, err := Raw("-t", string(table), "-L", chain); err == nil {
+	if _, err := Raw("-t", string(table), "-nL", chain); err == nil {
 		return true
 	}
 	return false
diff --git a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
index a285e10..ebcdd80 100644
--- a/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
+++ b/vendor/github.com/docker/libnetwork/ipvs/ipvs.go
@@ -116,6 +116,13 @@
 	return i.doCmd(s, nil, ipvsCmdDelService)
 }
 
+// Flush deletes all existing services in the passed
+// handle.
+func (i *Handle) Flush() error {
+	_, err := i.doCmdWithoutAttr(ipvsCmdFlush)
+	return err
+}
+
 // NewDestination creates a new real server in the passed ipvs
 // service which should already be existing in the passed handle.
 func (i *Handle) NewDestination(s *Service, d *Destination) error {
diff --git a/vendor/github.com/docker/libnetwork/ipvs/netlink.go b/vendor/github.com/docker/libnetwork/ipvs/netlink.go
index b8d33dc..2089283 100644
--- a/vendor/github.com/docker/libnetwork/ipvs/netlink.go
+++ b/vendor/github.com/docker/libnetwork/ipvs/netlink.go
@@ -402,6 +402,13 @@
 	return res, nil
 }
 
+// doCmdWithoutAttr a simple wrapper of netlink socket execute command
+func (i *Handle) doCmdWithoutAttr(cmd uint8) ([][]byte, error) {
+	req := newIPVSRequest(cmd)
+	req.Seq = atomic.AddUint32(&i.seq, 1)
+	return execute(i.sock, req, 0)
+}
+
 func assembleDestination(attrs []syscall.NetlinkRouteAttr) (*Destination, error) {
 
 	var d Destination
diff --git a/vendor/github.com/docker/libnetwork/network.go b/vendor/github.com/docker/libnetwork/network.go
index 3f44553..1ad4706 100644
--- a/vendor/github.com/docker/libnetwork/network.go
+++ b/vendor/github.com/docker/libnetwork/network.go
@@ -995,6 +995,10 @@
 		logrus.Errorf("Failed leaving network %s from the agent cluster: %v", n.Name(), err)
 	}
 
+	// Cleanup the service discovery for this network
+	c.cleanupServiceDiscovery(n.ID())
+
+	// Cleanup the load balancer
 	c.cleanupServiceBindings(n.ID())
 
 removeFromStore:
diff --git a/vendor/github.com/docker/libnetwork/networkdb/broadcast.go b/vendor/github.com/docker/libnetwork/networkdb/broadcast.go
index 52e96ec..174023b 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/broadcast.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/broadcast.go
@@ -32,7 +32,7 @@
 	nEvent := NetworkEvent{
 		Type:      event,
 		LTime:     ltime,
-		NodeName:  nDB.config.NodeName,
+		NodeName:  nDB.config.NodeID,
 		NetworkID: nid,
 	}
 
@@ -44,7 +44,7 @@
 	nDB.networkBroadcasts.QueueBroadcast(&networkEventMessage{
 		msg:  raw,
 		id:   nid,
-		node: nDB.config.NodeName,
+		node: nDB.config.NodeID,
 	})
 	return nil
 }
@@ -72,7 +72,7 @@
 	nEvent := NodeEvent{
 		Type:     event,
 		LTime:    nDB.networkClock.Increment(),
-		NodeName: nDB.config.NodeName,
+		NodeName: nDB.config.NodeID,
 	}
 
 	raw, err := encodeMessage(MessageTypeNodeEvent, &nEvent)
@@ -129,11 +129,13 @@
 	tEvent := TableEvent{
 		Type:      event,
 		LTime:     entry.ltime,
-		NodeName:  nDB.config.NodeName,
+		NodeName:  nDB.config.NodeID,
 		NetworkID: nid,
 		TableName: tname,
 		Key:       key,
 		Value:     entry.value,
+		// The duration in second is a float that below would be truncated
+		ResidualReapTime: int32(entry.reapTime.Seconds()),
 	}
 
 	raw, err := encodeMessage(MessageTypeTableEvent, &tEvent)
@@ -143,7 +145,7 @@
 
 	var broadcastQ *memberlist.TransmitLimitedQueue
 	nDB.RLock()
-	thisNodeNetworks, ok := nDB.networks[nDB.config.NodeName]
+	thisNodeNetworks, ok := nDB.networks[nDB.config.NodeID]
 	if ok {
 		// The network may have been removed
 		network, networkOk := thisNodeNetworks[nid]
@@ -166,7 +168,7 @@
 		id:    nid,
 		tname: tname,
 		key:   key,
-		node:  nDB.config.NodeName,
+		node:  nDB.config.NodeID,
 	})
 	return nil
 }
diff --git a/vendor/github.com/docker/libnetwork/networkdb/cluster.go b/vendor/github.com/docker/libnetwork/networkdb/cluster.go
index d15a576..06a7aff 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/cluster.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/cluster.go
@@ -17,11 +17,15 @@
 )
 
 const (
-	reapInterval     = 30 * time.Minute
-	reapPeriod       = 5 * time.Second
-	retryInterval    = 1 * time.Second
-	nodeReapInterval = 24 * time.Hour
-	nodeReapPeriod   = 2 * time.Hour
+	// The garbage collection logic for entries leverage the presence of the network.
+	// For this reason the expiration time of the network is put slightly higher than the entry expiration so that
+	// there is at least 5 extra cycle to make sure that all the entries are properly deleted before deleting the network.
+	reapEntryInterval   = 30 * time.Minute
+	reapNetworkInterval = reapEntryInterval + 5*reapPeriod
+	reapPeriod          = 5 * time.Second
+	retryInterval       = 1 * time.Second
+	nodeReapInterval    = 24 * time.Hour
+	nodeReapPeriod      = 2 * time.Hour
 )
 
 type logWriter struct{}
@@ -102,7 +106,7 @@
 	nDB.lastHealthTimestamp = nDB.lastStatsTimestamp
 
 	config := memberlist.DefaultLANConfig()
-	config.Name = nDB.config.NodeName
+	config.Name = nDB.config.NodeID
 	config.BindAddr = nDB.config.BindAddr
 	config.AdvertiseAddr = nDB.config.AdvertiseAddr
 	config.UDPBufferSize = nDB.config.PacketBufferSize
@@ -300,8 +304,9 @@
 // the reaper runs. NOTE nDB.reapTableEntries updates the reapTime with a readlock. This
 // is safe as long as no other concurrent path touches the reapTime field.
 func (nDB *NetworkDB) reapState() {
-	nDB.reapNetworks()
+	// The reapTableEntries leverage the presence of the network so garbage collect entries first
 	nDB.reapTableEntries()
+	nDB.reapNetworks()
 }
 
 func (nDB *NetworkDB) reapNetworks() {
@@ -321,49 +326,57 @@
 }
 
 func (nDB *NetworkDB) reapTableEntries() {
-	var paths []string
-
+	var nodeNetworks []string
+	// This is best effort, if the list of network changes will be picked up in the next cycle
 	nDB.RLock()
-	nDB.indexes[byTable].Walk(func(path string, v interface{}) bool {
-		entry, ok := v.(*entry)
-		if !ok {
-			return false
-		}
-
-		if !entry.deleting {
-			return false
-		}
-		if entry.reapTime > 0 {
-			entry.reapTime -= reapPeriod
-			return false
-		}
-		paths = append(paths, path)
-		return false
-	})
+	for nid := range nDB.networks[nDB.config.NodeID] {
+		nodeNetworks = append(nodeNetworks, nid)
+	}
 	nDB.RUnlock()
 
-	nDB.Lock()
-	for _, path := range paths {
-		params := strings.Split(path[1:], "/")
-		tname := params[0]
-		nid := params[1]
-		key := params[2]
+	cycleStart := time.Now()
+	// In order to avoid blocking the database for a long time, apply the garbage collection logic by network
+	// The lock is taken at the beginning of the cycle and the deletion is inline
+	for _, nid := range nodeNetworks {
+		nDB.Lock()
+		nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid), func(path string, v interface{}) bool {
+			// timeCompensation compensate in case the lock took some time to be released
+			timeCompensation := time.Since(cycleStart)
+			entry, ok := v.(*entry)
+			if !ok || !entry.deleting {
+				return false
+			}
 
-		if _, ok := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key)); !ok {
-			logrus.Errorf("Could not delete entry in table %s with network id %s and key %s as it does not exist", tname, nid, key)
-		}
+			// In this check we are adding an extra 1 second to guarantee that when the number is truncated to int32 to fit the packet
+			// for the tableEvent the number is always strictly > 1 and never 0
+			if entry.reapTime > reapPeriod+timeCompensation+time.Second {
+				entry.reapTime -= reapPeriod + timeCompensation
+				return false
+			}
 
-		if _, ok := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key)); !ok {
-			logrus.Errorf("Could not delete entry in network %s with table name %s and key %s as it does not exist", nid, tname, key)
-		}
+			params := strings.Split(path[1:], "/")
+			nid := params[0]
+			tname := params[1]
+			key := params[2]
+
+			okTable, okNetwork := nDB.deleteEntry(nid, tname, key)
+			if !okTable {
+				logrus.Errorf("Table tree delete failed, entry with key:%s does not exists in the table:%s network:%s", key, tname, nid)
+			}
+			if !okNetwork {
+				logrus.Errorf("Network tree delete failed, entry with key:%s does not exists in the network:%s table:%s", key, nid, tname)
+			}
+
+			return false
+		})
+		nDB.Unlock()
 	}
-	nDB.Unlock()
 }
 
 func (nDB *NetworkDB) gossip() {
 	networkNodes := make(map[string][]string)
 	nDB.RLock()
-	thisNodeNetworks := nDB.networks[nDB.config.NodeName]
+	thisNodeNetworks := nDB.networks[nDB.config.NodeID]
 	for nid := range thisNodeNetworks {
 		networkNodes[nid] = nDB.networkNodes[nid]
 
@@ -375,7 +388,7 @@
 	if printHealth {
 		healthScore := nDB.memberlist.GetHealthScore()
 		if healthScore != 0 {
-			logrus.Warnf("NetworkDB stats - healthscore:%d (connectivity issues)", healthScore)
+			logrus.Warnf("NetworkDB stats %v(%v) - healthscore:%d (connectivity issues)", nDB.config.Hostname, nDB.config.NodeID, healthScore)
 		}
 		nDB.lastHealthTimestamp = time.Now()
 	}
@@ -406,8 +419,10 @@
 		// Collect stats and print the queue info, note this code is here also to have a view of the queues empty
 		network.qMessagesSent += len(msgs)
 		if printStats {
-			logrus.Infof("NetworkDB stats - Queue net:%s qLen:%d netPeers:%d netMsg/s:%d",
-				nid, broadcastQ.NumQueued(), broadcastQ.NumNodes(), network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
+			logrus.Infof("NetworkDB stats %v(%v) - netID:%s leaving:%t netPeers:%d entries:%d Queue qLen:%d netMsg/s:%d",
+				nDB.config.Hostname, nDB.config.NodeID,
+				nid, network.leaving, broadcastQ.NumNodes(), network.entriesNumber, broadcastQ.NumQueued(),
+				network.qMessagesSent/int((nDB.config.StatsPrintPeriod/time.Second)))
 			network.qMessagesSent = 0
 		}
 
@@ -442,7 +457,7 @@
 func (nDB *NetworkDB) bulkSyncTables() {
 	var networks []string
 	nDB.RLock()
-	for nid, network := range nDB.networks[nDB.config.NodeName] {
+	for nid, network := range nDB.networks[nDB.config.NodeID] {
 		if network.leaving {
 			continue
 		}
@@ -508,10 +523,10 @@
 	var err error
 	var networks []string
 	for _, node := range nodes {
-		if node == nDB.config.NodeName {
+		if node == nDB.config.NodeID {
 			continue
 		}
-		logrus.Debugf("%s: Initiating bulk sync with node %v", nDB.config.NodeName, node)
+		logrus.Debugf("%v(%v): Initiating bulk sync with node %v", nDB.config.Hostname, nDB.config.NodeID, node)
 		networks = nDB.findCommonNetworks(node)
 		err = nDB.bulkSyncNode(networks, node, true)
 		// if its periodic bulksync stop after the first successful sync
@@ -542,7 +557,8 @@
 		unsolMsg = "unsolicited"
 	}
 
-	logrus.Debugf("%s: Initiating %s bulk sync for networks %v with node %s", nDB.config.NodeName, unsolMsg, networks, node)
+	logrus.Debugf("%v(%v): Initiating %s bulk sync for networks %v with node %s",
+		nDB.config.Hostname, nDB.config.NodeID, unsolMsg, networks, node)
 
 	nDB.RLock()
 	mnode := nDB.nodes[node]
@@ -572,6 +588,8 @@
 				TableName: params[1],
 				Key:       params[2],
 				Value:     entry.value,
+				// The duration in second is a float that below would be truncated
+				ResidualReapTime: int32(entry.reapTime.Seconds()),
 			}
 
 			msg, err := encodeMessage(MessageTypeTableEvent, &tEvent)
@@ -592,7 +610,7 @@
 	bsm := BulkSyncMessage{
 		LTime:       nDB.tableClock.Time(),
 		Unsolicited: unsolicited,
-		NodeName:    nDB.config.NodeName,
+		NodeName:    nDB.config.NodeID,
 		Networks:    networks,
 		Payload:     compound,
 	}
@@ -624,7 +642,7 @@
 		case <-t.C:
 			logrus.Errorf("Bulk sync to node %s timed out", node)
 		case <-ch:
-			logrus.Debugf("%s: Bulk sync to node %s took %s", nDB.config.NodeName, node, time.Since(startTime))
+			logrus.Debugf("%v(%v): Bulk sync to node %s took %s", nDB.config.Hostname, nDB.config.NodeID, node, time.Since(startTime))
 		}
 		t.Stop()
 	}
@@ -661,7 +679,7 @@
 		idx := randomOffset(n)
 		node := nodes[idx]
 
-		if node == nDB.config.NodeName {
+		if node == nDB.config.NodeID {
 			continue
 		}
 
diff --git a/vendor/github.com/docker/libnetwork/networkdb/delegate.go b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
index ffaf94e..6553810 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/delegate.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/delegate.go
@@ -1,9 +1,8 @@
 package networkdb
 
 import (
-	"fmt"
 	"net"
-	"strings"
+	"time"
 
 	"github.com/gogo/protobuf/proto"
 	"github.com/sirupsen/logrus"
@@ -58,29 +57,6 @@
 	return nil
 }
 
-func (nDB *NetworkDB) purgeSameNode(n *node) {
-	nDB.Lock()
-	defer nDB.Unlock()
-
-	prefix := strings.Split(n.Name, "-")[0]
-	for _, nodes := range []map[string]*node{
-		nDB.failedNodes,
-		nDB.leftNodes,
-		nDB.nodes,
-	} {
-		var nodeNames []string
-		for name, node := range nodes {
-			if strings.HasPrefix(name, prefix) && n.Addr.Equal(node.Addr) {
-				nodeNames = append(nodeNames, name)
-			}
-		}
-
-		for _, name := range nodeNames {
-			delete(nodes, name)
-		}
-	}
-}
-
 func (nDB *NetworkDB) handleNodeEvent(nEvent *NodeEvent) bool {
 	// Update our local clock if the received messages has newer
 	// time.
@@ -108,7 +84,6 @@
 		return false
 	}
 
-	nDB.purgeSameNode(n)
 	n.ltime = nEvent.LTime
 
 	switch nEvent.Type {
@@ -140,7 +115,7 @@
 	nDB.Lock()
 	defer nDB.Unlock()
 
-	if nEvent.NodeName == nDB.config.NodeName {
+	if nEvent.NodeName == nDB.config.NodeID {
 		return false
 	}
 
@@ -165,7 +140,7 @@
 		n.ltime = nEvent.LTime
 		n.leaving = nEvent.Type == NetworkEventTypeLeave
 		if n.leaving {
-			n.reapTime = reapInterval
+			n.reapTime = reapNetworkInterval
 
 			// The remote node is leaving the network, but not the gossip cluster.
 			// Mark all its entries in deleted state, this will guarantee that
@@ -198,13 +173,12 @@
 }
 
 func (nDB *NetworkDB) handleTableEvent(tEvent *TableEvent) bool {
-	// Update our local clock if the received messages has newer
-	// time.
+	// Update our local clock if the received messages has newer time.
 	nDB.tableClock.Witness(tEvent.LTime)
 
 	// Ignore the table events for networks that are in the process of going away
 	nDB.RLock()
-	networks := nDB.networks[nDB.config.NodeName]
+	networks := nDB.networks[nDB.config.NodeID]
 	network, ok := networks[tEvent.NetworkID]
 	// Check if the owner of the event is still part of the network
 	nodes := nDB.networkNodes[tEvent.NetworkID]
@@ -235,20 +209,27 @@
 		node:     tEvent.NodeName,
 		value:    tEvent.Value,
 		deleting: tEvent.Type == TableEventTypeDelete,
+		reapTime: time.Duration(tEvent.ResidualReapTime) * time.Second,
 	}
 
-	if e.deleting {
-		e.reapTime = reapInterval
+	// All the entries marked for deletion should have a reapTime set greater than 0
+	// This case can happen if the cluster is running different versions of the engine where the old version does not have the
+	// field. If that is not the case, this can be a BUG
+	if e.deleting && e.reapTime == 0 {
+		logrus.Warnf("handleTableEvent object %+v has a 0 reapTime, is the cluster running the same docker engine version?", tEvent)
+		e.reapTime = reapEntryInterval
 	}
 
 	nDB.Lock()
-	nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tEvent.TableName, tEvent.NetworkID, tEvent.Key), e)
-	nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", tEvent.NetworkID, tEvent.TableName, tEvent.Key), e)
+	nDB.createOrUpdateEntry(tEvent.NetworkID, tEvent.TableName, tEvent.Key, e)
 	nDB.Unlock()
 
 	if err != nil && tEvent.Type == TableEventTypeDelete {
-		// If it is a delete event and we didn't have the entry here don't repropagate
-		return true
+		// If it is a delete event and we did not have a state for it, don't propagate to the application
+		// If the residual reapTime is lower or equal to 1/6 of the total reapTime don't bother broadcasting it around
+		// most likely the cluster is already aware of it, if not who will sync with this node will catch the state too.
+		// This also avoids that deletion of entries close to their garbage collection ends up circuling around forever
+		return e.reapTime > reapEntryInterval/6
 	}
 
 	var op opType
@@ -287,7 +268,7 @@
 	}
 
 	// Ignore messages that this node generated.
-	if tEvent.NodeName == nDB.config.NodeName {
+	if tEvent.NodeName == nDB.config.NodeID {
 		return
 	}
 
@@ -300,25 +281,20 @@
 		}
 
 		nDB.RLock()
-		n, ok := nDB.networks[nDB.config.NodeName][tEvent.NetworkID]
+		n, ok := nDB.networks[nDB.config.NodeID][tEvent.NetworkID]
 		nDB.RUnlock()
 
-		if !ok {
+		// if the network is not there anymore, OR we are leaving the network OR the broadcast queue is not present
+		if !ok || n.leaving || n.tableBroadcasts == nil {
 			return
 		}
 
-		broadcastQ := n.tableBroadcasts
-
-		if broadcastQ == nil {
-			return
-		}
-
-		broadcastQ.QueueBroadcast(&tableEventMessage{
+		n.tableBroadcasts.QueueBroadcast(&tableEventMessage{
 			msg:   buf,
 			id:    tEvent.NetworkID,
 			tname: tEvent.TableName,
 			key:   tEvent.Key,
-			node:  nDB.config.NodeName,
+			node:  tEvent.NodeName,
 		})
 	}
 }
@@ -424,7 +400,7 @@
 	case MessageTypeCompound:
 		nDB.handleCompound(data, isBulkSync)
 	default:
-		logrus.Errorf("%s: unknown message type %d", nDB.config.NodeName, mType)
+		logrus.Errorf("%v(%v): unknown message type %d", nDB.config.Hostname, nDB.config.NodeID, mType)
 	}
 }
 
@@ -457,7 +433,7 @@
 
 	pp := NetworkPushPull{
 		LTime:    d.nDB.networkClock.Time(),
-		NodeName: d.nDB.config.NodeName,
+		NodeName: d.nDB.config.NodeID,
 	}
 
 	for name, nn := range d.nDB.networks {
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
index 73dd999..45bd9cc 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdb.go
@@ -11,6 +11,7 @@
 	"time"
 
 	"github.com/armon/go-radix"
+	"github.com/docker/docker/pkg/stringid"
 	"github.com/docker/go-events"
 	"github.com/docker/libnetwork/types"
 	"github.com/hashicorp/memberlist"
@@ -141,13 +142,21 @@
 
 	// Number of gossip messages sent related to this network during the last stats collection period
 	qMessagesSent int
+
+	// Number of entries on the network. This value is the sum of all the entries of all the tables of a specific network.
+	// Its use is for statistics purposes. It keep tracks of database size and is printed per network every StatsPrintPeriod
+	// interval
+	entriesNumber int
 }
 
 // Config represents the configuration of the networdb instance and
 // can be passed by the caller.
 type Config struct {
-	// NodeName is the cluster wide unique name for this node.
-	NodeName string
+	// NodeID is the node unique identifier of the node when is part of the cluster
+	NodeID string
+
+	// Hostname is the node hostname.
+	Hostname string
 
 	// BindAddr is the IP on which networkdb listens. It can be
 	// 0.0.0.0 to listen on all addresses on the host.
@@ -205,7 +214,8 @@
 func DefaultConfig() *Config {
 	hostname, _ := os.Hostname()
 	return &Config{
-		NodeName:          hostname,
+		NodeID:            stringid.TruncateID(stringid.GenerateRandomID()),
+		Hostname:          hostname,
 		BindAddr:          "0.0.0.0",
 		PacketBufferSize:  1400,
 		StatsPrintPeriod:  5 * time.Minute,
@@ -231,6 +241,7 @@
 	nDB.indexes[byTable] = radix.New()
 	nDB.indexes[byNetwork] = radix.New()
 
+	logrus.Debugf("New memberlist node - Node:%v will use memberlist nodeID:%v", c.Hostname, c.NodeID)
 	if err := nDB.clusterInit(); err != nil {
 		return nil, err
 	}
@@ -254,8 +265,11 @@
 // stopping timers, canceling goroutines etc.
 func (nDB *NetworkDB) Close() {
 	if err := nDB.clusterLeave(); err != nil {
-		logrus.Errorf("Could not close DB %s: %v", nDB.config.NodeName, err)
+		logrus.Errorf("%v(%v) Could not close DB: %v", nDB.config.Hostname, nDB.config.NodeID, err)
 	}
+
+	//Avoid (*Broadcaster).run goroutine leak
+	nDB.broadcaster.Close()
 }
 
 // ClusterPeers returns all the gossip cluster peers.
@@ -329,7 +343,7 @@
 
 	entry := &entry{
 		ltime: nDB.tableClock.Increment(),
-		node:  nDB.config.NodeName,
+		node:  nDB.config.NodeID,
 		value: value,
 	}
 
@@ -338,8 +352,7 @@
 	}
 
 	nDB.Lock()
-	nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
-	nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
+	nDB.createOrUpdateEntry(nid, tname, key, entry)
 	nDB.Unlock()
 
 	return nil
@@ -356,7 +369,7 @@
 
 	entry := &entry{
 		ltime: nDB.tableClock.Increment(),
-		node:  nDB.config.NodeName,
+		node:  nDB.config.NodeID,
 		value: value,
 	}
 
@@ -365,8 +378,7 @@
 	}
 
 	nDB.Lock()
-	nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
-	nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
+	nDB.createOrUpdateEntry(nid, tname, key, entry)
 	nDB.Unlock()
 
 	return nil
@@ -399,10 +411,10 @@
 
 	entry := &entry{
 		ltime:    nDB.tableClock.Increment(),
-		node:     nDB.config.NodeName,
+		node:     nDB.config.NodeID,
 		value:    value,
 		deleting: true,
-		reapTime: reapInterval,
+		reapTime: reapEntryInterval,
 	}
 
 	if err := nDB.sendTableEvent(TableEventTypeDelete, nid, tname, key, entry); err != nil {
@@ -410,8 +422,7 @@
 	}
 
 	nDB.Lock()
-	nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
-	nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
+	nDB.createOrUpdateEntry(nid, tname, key, entry)
 	nDB.Unlock()
 
 	return nil
@@ -449,7 +460,7 @@
 //		  entries owned by remote nodes, we will accept them and we notify the application
 func (nDB *NetworkDB) deleteNodeNetworkEntries(nid, node string) {
 	// Indicates if the delete is triggered for the local node
-	isNodeLocal := node == nDB.config.NodeName
+	isNodeLocal := node == nDB.config.NodeID
 
 	nDB.indexes[byNetwork].WalkPrefix(fmt.Sprintf("/%s", nid),
 		func(path string, v interface{}) bool {
@@ -473,10 +484,10 @@
 
 			entry := &entry{
 				ltime:    oldEntry.ltime,
-				node:     node,
+				node:     oldEntry.node,
 				value:    oldEntry.value,
 				deleting: true,
-				reapTime: reapInterval,
+				reapTime: reapEntryInterval,
 			}
 
 			// we arrived at this point in 2 cases:
@@ -488,15 +499,19 @@
 					// without doing a delete of all the objects
 					entry.ltime++
 				}
-				nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
-				nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
+
+				if !oldEntry.deleting {
+					nDB.createOrUpdateEntry(nid, tname, key, entry)
+				}
 			} else {
 				// the local node is leaving the network, all the entries of remote nodes can be safely removed
-				nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
-				nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
+				nDB.deleteEntry(nid, tname, key)
 			}
 
-			nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
+			// Notify to the upper layer only entries not already marked for deletion
+			if !oldEntry.deleting {
+				nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, entry.value))
+			}
 			return false
 		})
 }
@@ -513,8 +528,7 @@
 		nid := params[1]
 		key := params[2]
 
-		nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
-		nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
+		nDB.deleteEntry(nid, tname, key)
 
 		nDB.broadcaster.Write(makeEvent(opDelete, tname, nid, key, oldEntry.value))
 		return false
@@ -553,12 +567,17 @@
 	ltime := nDB.networkClock.Increment()
 
 	nDB.Lock()
-	nodeNetworks, ok := nDB.networks[nDB.config.NodeName]
+	nodeNetworks, ok := nDB.networks[nDB.config.NodeID]
 	if !ok {
 		nodeNetworks = make(map[string]*network)
-		nDB.networks[nDB.config.NodeName] = nodeNetworks
+		nDB.networks[nDB.config.NodeID] = nodeNetworks
 	}
-	nodeNetworks[nid] = &network{id: nid, ltime: ltime}
+	n, ok := nodeNetworks[nid]
+	var entries int
+	if ok {
+		entries = n.entriesNumber
+	}
+	nodeNetworks[nid] = &network{id: nid, ltime: ltime, entriesNumber: entries}
 	nodeNetworks[nid].tableBroadcasts = &memberlist.TransmitLimitedQueue{
 		NumNodes: func() int {
 			nDB.RLock()
@@ -567,7 +586,7 @@
 		},
 		RetransmitMult: 4,
 	}
-	nDB.addNetworkNode(nid, nDB.config.NodeName)
+	nDB.addNetworkNode(nid, nDB.config.NodeID)
 	networkNodes := nDB.networkNodes[nid]
 	nDB.Unlock()
 
@@ -575,7 +594,7 @@
 		return fmt.Errorf("failed to send leave network event for %s: %v", nid, err)
 	}
 
-	logrus.Debugf("%s: joined network %s", nDB.config.NodeName, nid)
+	logrus.Debugf("%v(%v): joined network %s", nDB.config.Hostname, nDB.config.NodeID, nid)
 	if _, err := nDB.bulkSync(networkNodes, true); err != nil {
 		logrus.Errorf("Error bulk syncing while joining network %s: %v", nid, err)
 	}
@@ -599,12 +618,12 @@
 	defer nDB.Unlock()
 
 	// Remove myself from the list of the nodes participating to the network
-	nDB.deleteNetworkNode(nid, nDB.config.NodeName)
+	nDB.deleteNetworkNode(nid, nDB.config.NodeID)
 
 	// Update all the local entries marking them for deletion and delete all the remote entries
-	nDB.deleteNodeNetworkEntries(nid, nDB.config.NodeName)
+	nDB.deleteNodeNetworkEntries(nid, nDB.config.NodeID)
 
-	nodeNetworks, ok := nDB.networks[nDB.config.NodeName]
+	nodeNetworks, ok := nDB.networks[nDB.config.NodeID]
 	if !ok {
 		return fmt.Errorf("could not find self node for network %s while trying to leave", nid)
 	}
@@ -614,8 +633,9 @@
 		return fmt.Errorf("could not find network %s while trying to leave", nid)
 	}
 
+	logrus.Debugf("%v(%v): leaving network %s", nDB.config.Hostname, nDB.config.NodeID, nid)
 	n.ltime = ltime
-	n.reapTime = reapInterval
+	n.reapTime = reapNetworkInterval
 	n.leaving = true
 	return nil
 }
@@ -659,7 +679,7 @@
 	defer nDB.RUnlock()
 
 	var networks []string
-	for nid := range nDB.networks[nDB.config.NodeName] {
+	for nid := range nDB.networks[nDB.config.NodeID] {
 		if n, ok := nDB.networks[nodeName][nid]; ok {
 			if !n.leaving {
 				networks = append(networks, nid)
@@ -675,7 +695,37 @@
 	defer nDB.Unlock()
 
 	ltime := nDB.networkClock.Increment()
-	for _, n := range nDB.networks[nDB.config.NodeName] {
+	for _, n := range nDB.networks[nDB.config.NodeID] {
 		n.ltime = ltime
 	}
 }
+
+// createOrUpdateEntry this function handles the creation or update of entries into the local
+// tree store. It is also used to keep in sync the entries number of the network (all tables are aggregated)
+func (nDB *NetworkDB) createOrUpdateEntry(nid, tname, key string, entry interface{}) (bool, bool) {
+	_, okTable := nDB.indexes[byTable].Insert(fmt.Sprintf("/%s/%s/%s", tname, nid, key), entry)
+	_, okNetwork := nDB.indexes[byNetwork].Insert(fmt.Sprintf("/%s/%s/%s", nid, tname, key), entry)
+	if !okNetwork {
+		// Add only if it is an insert not an update
+		n, ok := nDB.networks[nDB.config.NodeID][nid]
+		if ok {
+			n.entriesNumber++
+		}
+	}
+	return okTable, okNetwork
+}
+
+// deleteEntry this function handles the deletion of entries into the local tree store.
+// It is also used to keep in sync the entries number of the network (all tables are aggregated)
+func (nDB *NetworkDB) deleteEntry(nid, tname, key string) (bool, bool) {
+	_, okTable := nDB.indexes[byTable].Delete(fmt.Sprintf("/%s/%s/%s", tname, nid, key))
+	_, okNetwork := nDB.indexes[byNetwork].Delete(fmt.Sprintf("/%s/%s/%s", nid, tname, key))
+	if okNetwork {
+		// Remove only if the delete is successful
+		n, ok := nDB.networks[nDB.config.NodeID][nid]
+		if ok {
+			n.entriesNumber--
+		}
+	}
+	return okTable, okNetwork
+}
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go b/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go
index dfbc713..7087a57 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdb.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-gogo.
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
 // source: networkdb.proto
-// DO NOT EDIT!
 
 /*
 	Package networkdb is a generated protocol buffer package.
@@ -28,9 +27,6 @@
 import github_com_hashicorp_serf_serf "github.com/hashicorp/serf/serf"
 
 import strings "strings"
-import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
-import sort "sort"
-import strconv "strconv"
 import reflect "reflect"
 
 import io "io"
@@ -42,7 +38,9 @@
 
 // This is a compile-time assertion to ensure that this generated file
 // is compatible with the proto package it is being compiled against.
-const _ = proto.GoGoProtoPackageIsVersion1
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
 
 // MessageType enum defines all the core message types that networkdb
 // uses to communicate to peers.
@@ -192,6 +190,20 @@
 func (*GossipMessage) ProtoMessage()               {}
 func (*GossipMessage) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{0} }
 
+func (m *GossipMessage) GetType() MessageType {
+	if m != nil {
+		return m.Type
+	}
+	return MessageTypeInvalid
+}
+
+func (m *GossipMessage) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
 // NodeEvent message payload definition.
 type NodeEvent struct {
 	Type NodeEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.NodeEvent_Type" json:"type,omitempty"`
@@ -207,6 +219,20 @@
 func (*NodeEvent) ProtoMessage()               {}
 func (*NodeEvent) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{1} }
 
+func (m *NodeEvent) GetType() NodeEvent_Type {
+	if m != nil {
+		return m.Type
+	}
+	return NodeEventTypeInvalid
+}
+
+func (m *NodeEvent) GetNodeName() string {
+	if m != nil {
+		return m.NodeName
+	}
+	return ""
+}
+
 // NetworkEvent message payload definition.
 type NetworkEvent struct {
 	Type NetworkEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.NetworkEvent_Type" json:"type,omitempty"`
@@ -224,6 +250,27 @@
 func (*NetworkEvent) ProtoMessage()               {}
 func (*NetworkEvent) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{2} }
 
+func (m *NetworkEvent) GetType() NetworkEvent_Type {
+	if m != nil {
+		return m.Type
+	}
+	return NetworkEventTypeInvalid
+}
+
+func (m *NetworkEvent) GetNodeName() string {
+	if m != nil {
+		return m.NodeName
+	}
+	return ""
+}
+
+func (m *NetworkEvent) GetNetworkID() string {
+	if m != nil {
+		return m.NetworkID
+	}
+	return ""
+}
+
 // NetworkEntry for push pull of networks.
 type NetworkEntry struct {
 	// ID of the network
@@ -241,6 +288,27 @@
 func (*NetworkEntry) ProtoMessage()               {}
 func (*NetworkEntry) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{3} }
 
+func (m *NetworkEntry) GetNetworkID() string {
+	if m != nil {
+		return m.NetworkID
+	}
+	return ""
+}
+
+func (m *NetworkEntry) GetNodeName() string {
+	if m != nil {
+		return m.NodeName
+	}
+	return ""
+}
+
+func (m *NetworkEntry) GetLeaving() bool {
+	if m != nil {
+		return m.Leaving
+	}
+	return false
+}
+
 // NetworkPushpull message payload definition.
 type NetworkPushPull struct {
 	// Lamport time when this push pull was initiated.
@@ -261,6 +329,13 @@
 	return nil
 }
 
+func (m *NetworkPushPull) GetNodeName() string {
+	if m != nil {
+		return m.NodeName
+	}
+	return ""
+}
+
 // TableEvent message payload definition.
 type TableEvent struct {
 	Type TableEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=networkdb.TableEvent_Type" json:"type,omitempty"`
@@ -276,12 +351,63 @@
 	Key string `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"`
 	// Entry value.
 	Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"`
+	// Residual reap time for the entry before getting deleted in seconds
+	ResidualReapTime int32 `protobuf:"varint,8,opt,name=residual_reap_time,json=residualReapTime,proto3" json:"residual_reap_time,omitempty"`
 }
 
 func (m *TableEvent) Reset()                    { *m = TableEvent{} }
 func (*TableEvent) ProtoMessage()               {}
 func (*TableEvent) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{5} }
 
+func (m *TableEvent) GetType() TableEvent_Type {
+	if m != nil {
+		return m.Type
+	}
+	return TableEventTypeInvalid
+}
+
+func (m *TableEvent) GetNodeName() string {
+	if m != nil {
+		return m.NodeName
+	}
+	return ""
+}
+
+func (m *TableEvent) GetNetworkID() string {
+	if m != nil {
+		return m.NetworkID
+	}
+	return ""
+}
+
+func (m *TableEvent) GetTableName() string {
+	if m != nil {
+		return m.TableName
+	}
+	return ""
+}
+
+func (m *TableEvent) GetKey() string {
+	if m != nil {
+		return m.Key
+	}
+	return ""
+}
+
+func (m *TableEvent) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *TableEvent) GetResidualReapTime() int32 {
+	if m != nil {
+		return m.ResidualReapTime
+	}
+	return 0
+}
+
 // BulkSync message payload definition.
 type BulkSyncMessage struct {
 	// Lamport time when this bulk sync was initiated.
@@ -302,6 +428,34 @@
 func (*BulkSyncMessage) ProtoMessage()               {}
 func (*BulkSyncMessage) Descriptor() ([]byte, []int) { return fileDescriptorNetworkdb, []int{6} }
 
+func (m *BulkSyncMessage) GetUnsolicited() bool {
+	if m != nil {
+		return m.Unsolicited
+	}
+	return false
+}
+
+func (m *BulkSyncMessage) GetNodeName() string {
+	if m != nil {
+		return m.NodeName
+	}
+	return ""
+}
+
+func (m *BulkSyncMessage) GetNetworks() []string {
+	if m != nil {
+		return m.Networks
+	}
+	return nil
+}
+
+func (m *BulkSyncMessage) GetPayload() []byte {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
 // Compound message payload definition.
 type CompoundMessage struct {
 	// A list of simple messages.
@@ -322,7 +476,7 @@
 type CompoundMessage_SimpleMessage struct {
 	// Bytestring payload of a message constructed using
 	// other message type definitions.
-	Payload []byte `protobuf:"bytes,1,opt,name=Payload,json=payload,proto3" json:"Payload,omitempty"`
+	Payload []byte `protobuf:"bytes,1,opt,name=Payload,proto3" json:"Payload,omitempty"`
 }
 
 func (m *CompoundMessage_SimpleMessage) Reset()      { *m = CompoundMessage_SimpleMessage{} }
@@ -331,6 +485,13 @@
 	return fileDescriptorNetworkdb, []int{7, 0}
 }
 
+func (m *CompoundMessage_SimpleMessage) GetPayload() []byte {
+	if m != nil {
+		return m.Payload
+	}
+	return nil
+}
+
 func init() {
 	proto.RegisterType((*GossipMessage)(nil), "networkdb.GossipMessage")
 	proto.RegisterType((*NodeEvent)(nil), "networkdb.NodeEvent")
@@ -413,7 +574,7 @@
 	if this == nil {
 		return "nil"
 	}
-	s := make([]string, 0, 11)
+	s := make([]string, 0, 12)
 	s = append(s, "&networkdb.TableEvent{")
 	s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n")
 	s = append(s, "LTime: "+fmt.Sprintf("%#v", this.LTime)+",\n")
@@ -422,6 +583,7 @@
 	s = append(s, "TableName: "+fmt.Sprintf("%#v", this.TableName)+",\n")
 	s = append(s, "Key: "+fmt.Sprintf("%#v", this.Key)+",\n")
 	s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+	s = append(s, "ResidualReapTime: "+fmt.Sprintf("%#v", this.ResidualReapTime)+",\n")
 	s = append(s, "}")
 	return strings.Join(s, "")
 }
@@ -469,197 +631,180 @@
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
 }
-func extensionToGoStringNetworkdb(e map[int32]github_com_gogo_protobuf_proto.Extension) string {
-	if e == nil {
-		return "nil"
-	}
-	s := "map[int32]proto.Extension{"
-	keys := make([]int, 0, len(e))
-	for k := range e {
-		keys = append(keys, int(k))
-	}
-	sort.Ints(keys)
-	ss := []string{}
-	for _, k := range keys {
-		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
-	}
-	s += strings.Join(ss, ",") + "}"
-	return s
-}
-func (m *GossipMessage) Marshal() (data []byte, err error) {
+func (m *GossipMessage) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *GossipMessage) MarshalTo(data []byte) (int, error) {
+func (m *GossipMessage) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if m.Type != 0 {
-		data[i] = 0x8
+		dAtA[i] = 0x8
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.Type))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.Type))
 	}
 	if len(m.Data) > 0 {
-		data[i] = 0x12
+		dAtA[i] = 0x12
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.Data)))
-		i += copy(data[i:], m.Data)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Data)))
+		i += copy(dAtA[i:], m.Data)
 	}
 	return i, nil
 }
 
-func (m *NodeEvent) Marshal() (data []byte, err error) {
+func (m *NodeEvent) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *NodeEvent) MarshalTo(data []byte) (int, error) {
+func (m *NodeEvent) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if m.Type != 0 {
-		data[i] = 0x8
+		dAtA[i] = 0x8
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.Type))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.Type))
 	}
 	if m.LTime != 0 {
-		data[i] = 0x10
+		dAtA[i] = 0x10
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime))
 	}
 	if len(m.NodeName) > 0 {
-		data[i] = 0x1a
+		dAtA[i] = 0x1a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
-		i += copy(data[i:], m.NodeName)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName)))
+		i += copy(dAtA[i:], m.NodeName)
 	}
 	return i, nil
 }
 
-func (m *NetworkEvent) Marshal() (data []byte, err error) {
+func (m *NetworkEvent) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *NetworkEvent) MarshalTo(data []byte) (int, error) {
+func (m *NetworkEvent) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if m.Type != 0 {
-		data[i] = 0x8
+		dAtA[i] = 0x8
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.Type))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.Type))
 	}
 	if m.LTime != 0 {
-		data[i] = 0x10
+		dAtA[i] = 0x10
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime))
 	}
 	if len(m.NodeName) > 0 {
-		data[i] = 0x1a
+		dAtA[i] = 0x1a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
-		i += copy(data[i:], m.NodeName)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName)))
+		i += copy(dAtA[i:], m.NodeName)
 	}
 	if len(m.NetworkID) > 0 {
-		data[i] = 0x22
+		dAtA[i] = 0x22
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NetworkID)))
-		i += copy(data[i:], m.NetworkID)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NetworkID)))
+		i += copy(dAtA[i:], m.NetworkID)
 	}
 	return i, nil
 }
 
-func (m *NetworkEntry) Marshal() (data []byte, err error) {
+func (m *NetworkEntry) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *NetworkEntry) MarshalTo(data []byte) (int, error) {
+func (m *NetworkEntry) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if len(m.NetworkID) > 0 {
-		data[i] = 0xa
+		dAtA[i] = 0xa
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NetworkID)))
-		i += copy(data[i:], m.NetworkID)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NetworkID)))
+		i += copy(dAtA[i:], m.NetworkID)
 	}
 	if m.LTime != 0 {
-		data[i] = 0x10
+		dAtA[i] = 0x10
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime))
 	}
 	if len(m.NodeName) > 0 {
-		data[i] = 0x1a
+		dAtA[i] = 0x1a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
-		i += copy(data[i:], m.NodeName)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName)))
+		i += copy(dAtA[i:], m.NodeName)
 	}
 	if m.Leaving {
-		data[i] = 0x20
+		dAtA[i] = 0x20
 		i++
 		if m.Leaving {
-			data[i] = 1
+			dAtA[i] = 1
 		} else {
-			data[i] = 0
+			dAtA[i] = 0
 		}
 		i++
 	}
 	return i, nil
 }
 
-func (m *NetworkPushPull) Marshal() (data []byte, err error) {
+func (m *NetworkPushPull) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *NetworkPushPull) MarshalTo(data []byte) (int, error) {
+func (m *NetworkPushPull) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if m.LTime != 0 {
-		data[i] = 0x8
+		dAtA[i] = 0x8
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime))
 	}
 	if len(m.Networks) > 0 {
 		for _, msg := range m.Networks {
-			data[i] = 0x12
+			dAtA[i] = 0x12
 			i++
-			i = encodeVarintNetworkdb(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintNetworkdb(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 				return 0, err
 			}
@@ -667,153 +812,158 @@
 		}
 	}
 	if len(m.NodeName) > 0 {
-		data[i] = 0x1a
+		dAtA[i] = 0x1a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
-		i += copy(data[i:], m.NodeName)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName)))
+		i += copy(dAtA[i:], m.NodeName)
 	}
 	return i, nil
 }
 
-func (m *TableEvent) Marshal() (data []byte, err error) {
+func (m *TableEvent) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *TableEvent) MarshalTo(data []byte) (int, error) {
+func (m *TableEvent) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if m.Type != 0 {
-		data[i] = 0x8
+		dAtA[i] = 0x8
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.Type))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.Type))
 	}
 	if m.LTime != 0 {
-		data[i] = 0x10
+		dAtA[i] = 0x10
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime))
 	}
 	if len(m.NodeName) > 0 {
-		data[i] = 0x1a
+		dAtA[i] = 0x1a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
-		i += copy(data[i:], m.NodeName)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName)))
+		i += copy(dAtA[i:], m.NodeName)
 	}
 	if len(m.NetworkID) > 0 {
-		data[i] = 0x22
+		dAtA[i] = 0x22
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NetworkID)))
-		i += copy(data[i:], m.NetworkID)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NetworkID)))
+		i += copy(dAtA[i:], m.NetworkID)
 	}
 	if len(m.TableName) > 0 {
-		data[i] = 0x2a
+		dAtA[i] = 0x2a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.TableName)))
-		i += copy(data[i:], m.TableName)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.TableName)))
+		i += copy(dAtA[i:], m.TableName)
 	}
 	if len(m.Key) > 0 {
-		data[i] = 0x32
+		dAtA[i] = 0x32
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.Key)))
-		i += copy(data[i:], m.Key)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Key)))
+		i += copy(dAtA[i:], m.Key)
 	}
 	if len(m.Value) > 0 {
-		data[i] = 0x3a
+		dAtA[i] = 0x3a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.Value)))
-		i += copy(data[i:], m.Value)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Value)))
+		i += copy(dAtA[i:], m.Value)
+	}
+	if m.ResidualReapTime != 0 {
+		dAtA[i] = 0x40
+		i++
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.ResidualReapTime))
 	}
 	return i, nil
 }
 
-func (m *BulkSyncMessage) Marshal() (data []byte, err error) {
+func (m *BulkSyncMessage) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *BulkSyncMessage) MarshalTo(data []byte) (int, error) {
+func (m *BulkSyncMessage) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if m.LTime != 0 {
-		data[i] = 0x8
+		dAtA[i] = 0x8
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(m.LTime))
+		i = encodeVarintNetworkdb(dAtA, i, uint64(m.LTime))
 	}
 	if m.Unsolicited {
-		data[i] = 0x10
+		dAtA[i] = 0x10
 		i++
 		if m.Unsolicited {
-			data[i] = 1
+			dAtA[i] = 1
 		} else {
-			data[i] = 0
+			dAtA[i] = 0
 		}
 		i++
 	}
 	if len(m.NodeName) > 0 {
-		data[i] = 0x1a
+		dAtA[i] = 0x1a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.NodeName)))
-		i += copy(data[i:], m.NodeName)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.NodeName)))
+		i += copy(dAtA[i:], m.NodeName)
 	}
 	if len(m.Networks) > 0 {
 		for _, s := range m.Networks {
-			data[i] = 0x22
+			dAtA[i] = 0x22
 			i++
 			l = len(s)
 			for l >= 1<<7 {
-				data[i] = uint8(uint64(l)&0x7f | 0x80)
+				dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
 				l >>= 7
 				i++
 			}
-			data[i] = uint8(l)
+			dAtA[i] = uint8(l)
 			i++
-			i += copy(data[i:], s)
+			i += copy(dAtA[i:], s)
 		}
 	}
 	if len(m.Payload) > 0 {
-		data[i] = 0x2a
+		dAtA[i] = 0x2a
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.Payload)))
-		i += copy(data[i:], m.Payload)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Payload)))
+		i += copy(dAtA[i:], m.Payload)
 	}
 	return i, nil
 }
 
-func (m *CompoundMessage) Marshal() (data []byte, err error) {
+func (m *CompoundMessage) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *CompoundMessage) MarshalTo(data []byte) (int, error) {
+func (m *CompoundMessage) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if len(m.Messages) > 0 {
 		for _, msg := range m.Messages {
-			data[i] = 0xa
+			dAtA[i] = 0xa
 			i++
-			i = encodeVarintNetworkdb(data, i, uint64(msg.Size()))
-			n, err := msg.MarshalTo(data[i:])
+			i = encodeVarintNetworkdb(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
 			if err != nil {
 				return 0, err
 			}
@@ -823,55 +973,55 @@
 	return i, nil
 }
 
-func (m *CompoundMessage_SimpleMessage) Marshal() (data []byte, err error) {
+func (m *CompoundMessage_SimpleMessage) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
-	data = make([]byte, size)
-	n, err := m.MarshalTo(data)
+	dAtA = make([]byte, size)
+	n, err := m.MarshalTo(dAtA)
 	if err != nil {
 		return nil, err
 	}
-	return data[:n], nil
+	return dAtA[:n], nil
 }
 
-func (m *CompoundMessage_SimpleMessage) MarshalTo(data []byte) (int, error) {
+func (m *CompoundMessage_SimpleMessage) MarshalTo(dAtA []byte) (int, error) {
 	var i int
 	_ = i
 	var l int
 	_ = l
 	if len(m.Payload) > 0 {
-		data[i] = 0xa
+		dAtA[i] = 0xa
 		i++
-		i = encodeVarintNetworkdb(data, i, uint64(len(m.Payload)))
-		i += copy(data[i:], m.Payload)
+		i = encodeVarintNetworkdb(dAtA, i, uint64(len(m.Payload)))
+		i += copy(dAtA[i:], m.Payload)
 	}
 	return i, nil
 }
 
-func encodeFixed64Networkdb(data []byte, offset int, v uint64) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
-	data[offset+4] = uint8(v >> 32)
-	data[offset+5] = uint8(v >> 40)
-	data[offset+6] = uint8(v >> 48)
-	data[offset+7] = uint8(v >> 56)
+func encodeFixed64Networkdb(dAtA []byte, offset int, v uint64) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
+	dAtA[offset+4] = uint8(v >> 32)
+	dAtA[offset+5] = uint8(v >> 40)
+	dAtA[offset+6] = uint8(v >> 48)
+	dAtA[offset+7] = uint8(v >> 56)
 	return offset + 8
 }
-func encodeFixed32Networkdb(data []byte, offset int, v uint32) int {
-	data[offset] = uint8(v)
-	data[offset+1] = uint8(v >> 8)
-	data[offset+2] = uint8(v >> 16)
-	data[offset+3] = uint8(v >> 24)
+func encodeFixed32Networkdb(dAtA []byte, offset int, v uint32) int {
+	dAtA[offset] = uint8(v)
+	dAtA[offset+1] = uint8(v >> 8)
+	dAtA[offset+2] = uint8(v >> 16)
+	dAtA[offset+3] = uint8(v >> 24)
 	return offset + 4
 }
-func encodeVarintNetworkdb(data []byte, offset int, v uint64) int {
+func encodeVarintNetworkdb(dAtA []byte, offset int, v uint64) int {
 	for v >= 1<<7 {
-		data[offset] = uint8(v&0x7f | 0x80)
+		dAtA[offset] = uint8(v&0x7f | 0x80)
 		v >>= 7
 		offset++
 	}
-	data[offset] = uint8(v)
+	dAtA[offset] = uint8(v)
 	return offset + 1
 }
 func (m *GossipMessage) Size() (n int) {
@@ -991,6 +1141,9 @@
 	if l > 0 {
 		n += 1 + l + sovNetworkdb(uint64(l))
 	}
+	if m.ResidualReapTime != 0 {
+		n += 1 + sovNetworkdb(uint64(m.ResidualReapTime))
+	}
 	return n
 }
 
@@ -1128,6 +1281,7 @@
 		`TableName:` + fmt.Sprintf("%v", this.TableName) + `,`,
 		`Key:` + fmt.Sprintf("%v", this.Key) + `,`,
 		`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
+		`ResidualReapTime:` + fmt.Sprintf("%v", this.ResidualReapTime) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -1174,8 +1328,8 @@
 	pv := reflect.Indirect(rv).Interface()
 	return fmt.Sprintf("*%v", pv)
 }
-func (m *GossipMessage) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *GossipMessage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -1187,7 +1341,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -1215,7 +1369,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.Type |= (MessageType(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1234,7 +1388,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				byteLen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1248,14 +1402,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Data = append(m.Data[:0], data[iNdEx:postIndex]...)
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
 			if m.Data == nil {
 				m.Data = []byte{}
 			}
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -1274,8 +1428,8 @@
 	}
 	return nil
 }
-func (m *NodeEvent) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *NodeEvent) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -1287,7 +1441,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -1315,7 +1469,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.Type |= (NodeEvent_Type(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1334,7 +1488,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1353,7 +1507,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1368,11 +1522,11 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NodeName = string(data[iNdEx:postIndex])
+			m.NodeName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -1391,8 +1545,8 @@
 	}
 	return nil
 }
-func (m *NetworkEvent) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *NetworkEvent) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -1404,7 +1558,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -1432,7 +1586,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.Type |= (NetworkEvent_Type(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1451,7 +1605,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1470,7 +1624,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1485,7 +1639,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NodeName = string(data[iNdEx:postIndex])
+			m.NodeName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 4:
 			if wireType != 2 {
@@ -1499,7 +1653,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1514,11 +1668,11 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NetworkID = string(data[iNdEx:postIndex])
+			m.NetworkID = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -1537,8 +1691,8 @@
 	}
 	return nil
 }
-func (m *NetworkEntry) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *NetworkEntry) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -1550,7 +1704,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -1578,7 +1732,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1593,7 +1747,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NetworkID = string(data[iNdEx:postIndex])
+			m.NetworkID = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 2:
 			if wireType != 0 {
@@ -1607,7 +1761,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1626,7 +1780,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1641,7 +1795,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NodeName = string(data[iNdEx:postIndex])
+			m.NodeName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 4:
 			if wireType != 0 {
@@ -1655,7 +1809,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				v |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1665,7 +1819,7 @@
 			m.Leaving = bool(v != 0)
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -1684,8 +1838,8 @@
 	}
 	return nil
 }
-func (m *NetworkPushPull) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *NetworkPushPull) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -1697,7 +1851,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -1725,7 +1879,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1744,7 +1898,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1759,7 +1913,7 @@
 				return io.ErrUnexpectedEOF
 			}
 			m.Networks = append(m.Networks, &NetworkEntry{})
-			if err := m.Networks[len(m.Networks)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Networks[len(m.Networks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
@@ -1775,7 +1929,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1790,11 +1944,11 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NodeName = string(data[iNdEx:postIndex])
+			m.NodeName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -1813,8 +1967,8 @@
 	}
 	return nil
 }
-func (m *TableEvent) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *TableEvent) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -1826,7 +1980,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -1854,7 +2008,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.Type |= (TableEvent_Type(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1873,7 +2027,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1892,7 +2046,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1907,7 +2061,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NodeName = string(data[iNdEx:postIndex])
+			m.NodeName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 4:
 			if wireType != 2 {
@@ -1921,7 +2075,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1936,7 +2090,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NetworkID = string(data[iNdEx:postIndex])
+			m.NetworkID = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 5:
 			if wireType != 2 {
@@ -1950,7 +2104,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1965,7 +2119,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.TableName = string(data[iNdEx:postIndex])
+			m.TableName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 6:
 			if wireType != 2 {
@@ -1979,7 +2133,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -1994,7 +2148,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Key = string(data[iNdEx:postIndex])
+			m.Key = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 7:
 			if wireType != 2 {
@@ -2008,7 +2162,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				byteLen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2022,14 +2176,33 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Value = append(m.Value[:0], data[iNdEx:postIndex]...)
+			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
 			if m.Value == nil {
 				m.Value = []byte{}
 			}
 			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResidualReapTime", wireType)
+			}
+			m.ResidualReapTime = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowNetworkdb
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ResidualReapTime |= (int32(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -2048,8 +2221,8 @@
 	}
 	return nil
 }
-func (m *BulkSyncMessage) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *BulkSyncMessage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -2061,7 +2234,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -2089,7 +2262,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				m.LTime |= (github_com_hashicorp_serf_serf.LamportTime(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2108,7 +2281,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				v |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2128,7 +2301,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2143,7 +2316,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.NodeName = string(data[iNdEx:postIndex])
+			m.NodeName = string(dAtA[iNdEx:postIndex])
 			iNdEx = postIndex
 		case 4:
 			if wireType != 2 {
@@ -2157,7 +2330,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				stringLen |= (uint64(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2172,7 +2345,7 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Networks = append(m.Networks, string(data[iNdEx:postIndex]))
+			m.Networks = append(m.Networks, string(dAtA[iNdEx:postIndex]))
 			iNdEx = postIndex
 		case 5:
 			if wireType != 2 {
@@ -2186,7 +2359,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				byteLen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2200,14 +2373,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Payload = append(m.Payload[:0], data[iNdEx:postIndex]...)
+			m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...)
 			if m.Payload == nil {
 				m.Payload = []byte{}
 			}
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -2226,8 +2399,8 @@
 	}
 	return nil
 }
-func (m *CompoundMessage) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *CompoundMessage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -2239,7 +2412,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -2267,7 +2440,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2282,13 +2455,13 @@
 				return io.ErrUnexpectedEOF
 			}
 			m.Messages = append(m.Messages, &CompoundMessage_SimpleMessage{})
-			if err := m.Messages[len(m.Messages)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {
+			if err := m.Messages[len(m.Messages)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
 				return err
 			}
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -2307,8 +2480,8 @@
 	}
 	return nil
 }
-func (m *CompoundMessage_SimpleMessage) Unmarshal(data []byte) error {
-	l := len(data)
+func (m *CompoundMessage_SimpleMessage) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		preIndex := iNdEx
@@ -2320,7 +2493,7 @@
 			if iNdEx >= l {
 				return io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -2348,7 +2521,7 @@
 				if iNdEx >= l {
 					return io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				byteLen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2362,14 +2535,14 @@
 			if postIndex > l {
 				return io.ErrUnexpectedEOF
 			}
-			m.Payload = append(m.Payload[:0], data[iNdEx:postIndex]...)
+			m.Payload = append(m.Payload[:0], dAtA[iNdEx:postIndex]...)
 			if m.Payload == nil {
 				m.Payload = []byte{}
 			}
 			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
-			skippy, err := skipNetworkdb(data[iNdEx:])
+			skippy, err := skipNetworkdb(dAtA[iNdEx:])
 			if err != nil {
 				return err
 			}
@@ -2388,8 +2561,8 @@
 	}
 	return nil
 }
-func skipNetworkdb(data []byte) (n int, err error) {
-	l := len(data)
+func skipNetworkdb(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
 	iNdEx := 0
 	for iNdEx < l {
 		var wire uint64
@@ -2400,7 +2573,7 @@
 			if iNdEx >= l {
 				return 0, io.ErrUnexpectedEOF
 			}
-			b := data[iNdEx]
+			b := dAtA[iNdEx]
 			iNdEx++
 			wire |= (uint64(b) & 0x7F) << shift
 			if b < 0x80 {
@@ -2418,7 +2591,7 @@
 					return 0, io.ErrUnexpectedEOF
 				}
 				iNdEx++
-				if data[iNdEx-1] < 0x80 {
+				if dAtA[iNdEx-1] < 0x80 {
 					break
 				}
 			}
@@ -2435,7 +2608,7 @@
 				if iNdEx >= l {
 					return 0, io.ErrUnexpectedEOF
 				}
-				b := data[iNdEx]
+				b := dAtA[iNdEx]
 				iNdEx++
 				length |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
@@ -2458,7 +2631,7 @@
 					if iNdEx >= l {
 						return 0, io.ErrUnexpectedEOF
 					}
-					b := data[iNdEx]
+					b := dAtA[iNdEx]
 					iNdEx++
 					innerWire |= (uint64(b) & 0x7F) << shift
 					if b < 0x80 {
@@ -2469,7 +2642,7 @@
 				if innerWireType == 4 {
 					break
 				}
-				next, err := skipNetworkdb(data[start:])
+				next, err := skipNetworkdb(dAtA[start:])
 				if err != nil {
 					return 0, err
 				}
@@ -2493,62 +2666,68 @@
 	ErrIntOverflowNetworkdb   = fmt.Errorf("proto: integer overflow")
 )
 
+func init() { proto.RegisterFile("networkdb.proto", fileDescriptorNetworkdb) }
+
 var fileDescriptorNetworkdb = []byte{
-	// 887 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x96, 0xc1, 0x6e, 0xe3, 0x44,
-	0x18, 0xc7, 0xeb, 0xc4, 0x49, 0xe3, 0xaf, 0x0d, 0x1b, 0xbc, 0xdd, 0xad, 0xd7, 0x0b, 0x49, 0x31,
-	0xcb, 0x2a, 0x44, 0xe0, 0xa2, 0xee, 0x13, 0x24, 0xb1, 0x05, 0xd9, 0xf5, 0x3a, 0x91, 0x93, 0x14,
-	0x71, 0x8a, 0x9c, 0x78, 0x48, 0xac, 0x3a, 0xb6, 0x15, 0x3b, 0x45, 0x39, 0x81, 0x38, 0xad, 0x78,
-	0x07, 0x4e, 0xcb, 0x99, 0x07, 0xe0, 0xc0, 0x89, 0xc3, 0x8a, 0x13, 0xdc, 0x10, 0x87, 0x8a, 0xee,
-	0x13, 0xf0, 0x08, 0x8c, 0xc7, 0x76, 0x32, 0x4e, 0xa3, 0x5e, 0x40, 0xc0, 0xc1, 0xad, 0x67, 0xe6,
-	0xe7, 0xcf, 0xdf, 0xf7, 0x9f, 0xff, 0xe7, 0x09, 0xdc, 0x71, 0x51, 0xf8, 0x85, 0xb7, 0xb8, 0xb0,
-	0xc6, 0xb2, 0xbf, 0xf0, 0x42, 0x8f, 0xe7, 0xd6, 0x13, 0xe2, 0xd1, 0xd4, 0x9b, 0x7a, 0x64, 0xf6,
-	0x34, 0xba, 0x8b, 0x01, 0xa9, 0x0b, 0xe5, 0x8f, 0xbd, 0x20, 0xb0, 0xfd, 0xe7, 0x28, 0x08, 0xcc,
-	0x29, 0xe2, 0x1b, 0xc0, 0x86, 0x2b, 0x1f, 0x09, 0xcc, 0x09, 0x53, 0x7f, 0xe3, 0xec, 0xbe, 0xbc,
-	0x89, 0x98, 0x10, 0x03, 0xbc, 0x6a, 0x10, 0x86, 0xe7, 0x81, 0xb5, 0xcc, 0xd0, 0x14, 0x72, 0x98,
-	0x3d, 0x34, 0xc8, 0xbd, 0xf4, 0x32, 0x07, 0x9c, 0xee, 0x59, 0x48, 0xbd, 0x44, 0x6e, 0xc8, 0x7f,
-	0x98, 0x89, 0xf6, 0x80, 0x8a, 0xb6, 0x66, 0x64, 0x2a, 0x60, 0x07, 0x8a, 0xce, 0x28, 0xb4, 0xe7,
-	0x88, 0x84, 0x64, 0x5b, 0x67, 0xaf, 0xae, 0x6a, 0x7b, 0xbf, 0x5f, 0xd5, 0x1a, 0x53, 0x3b, 0x9c,
-	0x2d, 0xc7, 0xf2, 0xc4, 0x9b, 0x9f, 0xce, 0xcc, 0x60, 0x66, 0x4f, 0xbc, 0x85, 0x7f, 0x1a, 0xa0,
-	0xc5, 0xe7, 0xe4, 0x8f, 0xac, 0x99, 0x73, 0xdf, 0x5b, 0x84, 0x03, 0xfc, 0xa4, 0x51, 0x70, 0xa2,
-	0x7f, 0xfc, 0x43, 0xe0, 0x5c, 0xfc, 0x8a, 0x91, 0x6b, 0xe2, 0x68, 0x79, 0x1c, 0x8d, 0x33, 0x4a,
-	0xd1, 0x84, 0x8e, 0xc7, 0xd2, 0x97, 0xc0, 0x46, 0x6f, 0xe5, 0xdf, 0x83, 0xfd, 0x8e, 0x7e, 0xde,
-	0xd4, 0x3a, 0x4a, 0x65, 0x4f, 0x14, 0xbe, 0xf9, 0xf6, 0xe4, 0x68, 0x9d, 0x56, 0xb4, 0xde, 0x71,
-	0x2f, 0x4d, 0xc7, 0xb6, 0xf8, 0x1a, 0xb0, 0x4f, 0xbb, 0x1d, 0xbd, 0xc2, 0x88, 0xf7, 0x30, 0xf3,
-	0x66, 0x86, 0x79, 0xea, 0xd9, 0x2e, 0xff, 0x0e, 0x14, 0x34, 0xb5, 0x79, 0xae, 0x56, 0x72, 0xe2,
-	0x7d, 0x4c, 0xf0, 0x19, 0x42, 0x43, 0xe6, 0x25, 0x12, 0x0f, 0x5f, 0xbc, 0xac, 0xee, 0xfd, 0xf0,
-	0x5d, 0x95, 0xbc, 0x58, 0xba, 0xce, 0xc1, 0xa1, 0x1e, 0x6b, 0x11, 0x0b, 0xf5, 0x51, 0x46, 0xa8,
-	0xb7, 0x68, 0xa1, 0x28, 0xec, 0x3f, 0xd0, 0x8a, 0xff, 0x00, 0x20, 0x49, 0x66, 0x64, 0x5b, 0x02,
-	0x1b, 0xad, 0xb6, 0xca, 0xaf, 0xaf, 0x6a, 0x5c, 0x92, 0x58, 0x47, 0x31, 0x52, 0x97, 0x75, 0x2c,
-	0xe9, 0x05, 0x93, 0x48, 0x5b, 0xa7, 0xa5, 0x7d, 0x88, 0x45, 0x39, 0xa6, 0x0b, 0xa1, 0xd5, 0x95,
-	0xd6, 0xea, 0xc6, 0x3b, 0xb0, 0x85, 0x11, 0x81, 0x1f, 0x6d, 0x04, 0x7e, 0x80, 0xa1, 0x7b, 0xdb,
-	0xd0, 0x2e, 0x8d, 0x7f, 0x64, 0x36, 0x1a, 0xbb, 0xe1, 0x62, 0xb5, 0x55, 0x09, 0x73, 0x7b, 0x25,
-	0xff, 0x9a, 0xbe, 0x02, 0xec, 0x3b, 0x38, 0x7b, 0xdb, 0x9d, 0x12, 0x71, 0x4b, 0x46, 0x3a, 0x94,
-	0xbe, 0x67, 0xe0, 0x4e, 0x92, 0x5a, 0x6f, 0x19, 0xcc, 0x7a, 0x4b, 0xc7, 0xa1, 0xb2, 0x62, 0xfe,
-	0x6e, 0x56, 0x4f, 0xa0, 0x94, 0x54, 0x1b, 0xe0, 0x12, 0xf3, 0xf5, 0x83, 0xb3, 0xe3, 0x1d, 0xb6,
-	0x8b, 0x94, 0x33, 0xd6, 0xe0, 0xed, 0x6d, 0xf5, 0x73, 0x1e, 0x60, 0x60, 0x8e, 0x9d, 0xa4, 0xf9,
-	0xe5, 0x8c, 0xa7, 0x45, 0x2a, 0xf8, 0x06, 0xfa, 0xdf, 0x3b, 0x9a, 0x7f, 0x1b, 0x20, 0x8c, 0xd2,
-	0x8d, 0x63, 0x15, 0x48, 0x2c, 0x8e, 0xcc, 0x90, 0x60, 0x15, 0xc8, 0x5f, 0xa0, 0x95, 0x50, 0x24,
-	0xf3, 0xd1, 0x2d, 0x7f, 0x04, 0x05, 0x6c, 0xec, 0x25, 0x12, 0xf6, 0xc9, 0x67, 0x31, 0x1e, 0x44,
-	0x9b, 0x19, 0x37, 0xc6, 0x63, 0xba, 0x31, 0x88, 0x99, 0x37, 0x6a, 0xd0, 0x6d, 0xf1, 0x08, 0x8a,
-	0x6d, 0x43, 0x6d, 0x0e, 0xd4, 0xb4, 0x31, 0xb2, 0x58, 0x7b, 0x81, 0xcc, 0x10, 0x45, 0xd4, 0xb0,
-	0xa7, 0x44, 0x54, 0x6e, 0x17, 0x35, 0xf4, 0xad, 0x84, 0x52, 0x54, 0x4d, 0xc5, 0x54, 0x7e, 0x17,
-	0xa5, 0x20, 0x07, 0x85, 0xdb, 0xed, 0xf3, 0x2b, 0x76, 0x5f, 0x6b, 0xe9, 0x5c, 0xf4, 0x57, 0xee,
-	0x24, 0x3d, 0x1c, 0xfe, 0x41, 0xf7, 0x9d, 0xc0, 0xc1, 0xd2, 0x0d, 0x3c, 0xc7, 0x9e, 0xd8, 0x21,
-	0xb2, 0xc8, 0x8e, 0x97, 0x0c, 0x7a, 0xea, 0xf6, 0x3d, 0x14, 0x29, 0xf3, 0xb2, 0xd8, 0xbc, 0x1c,
-	0xe5, 0x51, 0xdc, 0x51, 0xbe, 0xb9, 0x72, 0x3c, 0xd3, 0x22, 0xdb, 0x75, 0x68, 0xa4, 0x43, 0xe9,
-	0x6b, 0x5c, 0x53, 0xdb, 0xc3, 0xb9, 0x2c, 0x5d, 0x2b, 0xad, 0x49, 0x81, 0xd2, 0x3c, 0xbe, 0x0d,
-	0x70, 0x55, 0x51, 0x1b, 0xd4, 0x29, 0xa7, 0x6e, 0xd1, 0x72, 0xdf, 0x9e, 0xfb, 0x0e, 0x4a, 0x46,
-	0xc6, 0xfa, 0x49, 0xf1, 0x7d, 0x28, 0x67, 0x96, 0xa2, 0x24, 0x7a, 0x49, 0x12, 0x4c, 0x26, 0x89,
-	0xc6, 0x4f, 0x39, 0x38, 0xa0, 0xce, 0x52, 0xfe, 0x5d, 0xda, 0x10, 0xe4, 0xf8, 0xa0, 0x56, 0x53,
-	0x37, 0xc8, 0x50, 0xd6, 0xd5, 0xc1, 0xa7, 0x5d, 0xe3, 0xd9, 0x48, 0x3d, 0x57, 0xf5, 0x01, 0x36,
-	0x05, 0xf9, 0xa8, 0x52, 0x68, 0xe6, 0x3c, 0x69, 0xc0, 0xc1, 0xa0, 0xd9, 0xd2, 0xd4, 0x84, 0x4e,
-	0x3e, 0x9b, 0x14, 0x4d, 0xf5, 0xe9, 0x63, 0xe0, 0x7a, 0xc3, 0xfe, 0x27, 0xa3, 0xde, 0x50, 0xd3,
-	0xb0, 0x41, 0x8e, 0x31, 0x79, 0x97, 0x22, 0xd7, 0xdf, 0x1e, 0xcc, 0xb5, 0x86, 0xda, 0xb3, 0x51,
-	0xff, 0x33, 0xbd, 0x5d, 0x61, 0x6f, 0x70, 0xa9, 0x59, 0xf0, 0xa9, 0x5a, 0x6a, 0x77, 0x9f, 0xf7,
-	0xba, 0x43, 0x5d, 0xa9, 0x14, 0x6e, 0x60, 0xa9, 0xa2, 0xf8, 0x84, 0x00, 0xbd, 0xab, 0xa4, 0x19,
-	0x16, 0x63, 0x63, 0xd2, 0xf5, 0xa4, 0x87, 0xa8, 0x78, 0x37, 0x31, 0x26, 0x2d, 0x5b, 0x4b, 0xf8,
-	0xed, 0xba, 0xba, 0xf7, 0xe7, 0x75, 0x95, 0xf9, 0xea, 0x75, 0x95, 0x79, 0x85, 0xaf, 0x5f, 0xf0,
-	0xf5, 0x07, 0xbe, 0xc6, 0x45, 0xf2, 0xd3, 0xe6, 0xc9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x21,
-	0x78, 0x72, 0xc3, 0x0e, 0x09, 0x00, 0x00,
+	// 953 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x96, 0xcd, 0x6e, 0xe3, 0x54,
+	0x14, 0xc7, 0x7b, 0xf3, 0xd5, 0xe4, 0x34, 0xa5, 0xe6, 0x4e, 0x67, 0xc6, 0xe3, 0x81, 0xc4, 0x98,
+	0x99, 0x2a, 0x53, 0x41, 0x8a, 0x3a, 0x4f, 0xd0, 0x24, 0x16, 0x64, 0x26, 0xe3, 0x44, 0x6e, 0x52,
+	0xc4, 0x2a, 0xba, 0xad, 0x2f, 0xa9, 0x55, 0xc7, 0xb6, 0x6c, 0x27, 0x28, 0x2b, 0x10, 0xab, 0x51,
+	0x16, 0xbc, 0x41, 0x56, 0xc3, 0x9a, 0x07, 0x40, 0x2c, 0x59, 0xcc, 0x82, 0x05, 0xec, 0x10, 0x8b,
+	0x88, 0xe6, 0x09, 0x78, 0x04, 0xe4, 0x6b, 0x3b, 0xb9, 0x49, 0xab, 0x91, 0x10, 0x23, 0xc1, 0x26,
+	0xb9, 0x1f, 0xbf, 0x1c, 0x9f, 0xf3, 0xf7, 0xff, 0xdc, 0x1b, 0xd8, 0xb3, 0x69, 0xf0, 0x95, 0xe3,
+	0x5d, 0x19, 0xe7, 0x55, 0xd7, 0x73, 0x02, 0x07, 0x17, 0x96, 0x0b, 0xd2, 0xfe, 0xc0, 0x19, 0x38,
+	0x6c, 0xf5, 0x28, 0x1c, 0x45, 0x80, 0xd2, 0x86, 0xdd, 0x4f, 0x1d, 0xdf, 0x37, 0xdd, 0x17, 0xd4,
+	0xf7, 0xc9, 0x80, 0xe2, 0x43, 0xc8, 0x04, 0x13, 0x97, 0x8a, 0x48, 0x46, 0x95, 0x77, 0x8e, 0xef,
+	0x55, 0x57, 0x11, 0x63, 0xa2, 0x3b, 0x71, 0xa9, 0xce, 0x18, 0x8c, 0x21, 0x63, 0x90, 0x80, 0x88,
+	0x29, 0x19, 0x55, 0x8a, 0x3a, 0x1b, 0x2b, 0xaf, 0x52, 0x50, 0xd0, 0x1c, 0x83, 0xaa, 0x63, 0x6a,
+	0x07, 0xf8, 0xe3, 0xb5, 0x68, 0x0f, 0xb8, 0x68, 0x4b, 0xa6, 0xca, 0x05, 0x6c, 0x42, 0xce, 0xea,
+	0x07, 0xe6, 0x90, 0xb2, 0x90, 0x99, 0xda, 0xf1, 0xeb, 0x79, 0x79, 0xeb, 0x8f, 0x79, 0xf9, 0x70,
+	0x60, 0x06, 0x97, 0xa3, 0xf3, 0xea, 0x85, 0x33, 0x3c, 0xba, 0x24, 0xfe, 0xa5, 0x79, 0xe1, 0x78,
+	0xee, 0x91, 0x4f, 0xbd, 0x2f, 0xd9, 0x47, 0xb5, 0x45, 0x86, 0xae, 0xe3, 0x05, 0x5d, 0x73, 0x48,
+	0xf5, 0xac, 0x15, 0x7e, 0xe1, 0x87, 0x50, 0xb0, 0x1d, 0x83, 0xf6, 0x6d, 0x32, 0xa4, 0x62, 0x5a,
+	0x46, 0x95, 0x82, 0x9e, 0x0f, 0x17, 0x34, 0x32, 0xa4, 0xca, 0xd7, 0x90, 0x09, 0x9f, 0x8a, 0x1f,
+	0xc3, 0x76, 0x53, 0x3b, 0x3b, 0x69, 0x35, 0x1b, 0xc2, 0x96, 0x24, 0x4e, 0x67, 0xf2, 0xfe, 0x32,
+	0xad, 0x70, 0xbf, 0x69, 0x8f, 0x89, 0x65, 0x1a, 0xb8, 0x0c, 0x99, 0x67, 0xed, 0xa6, 0x26, 0x20,
+	0xe9, 0xee, 0x74, 0x26, 0xbf, 0xbb, 0xc6, 0x3c, 0x73, 0x4c, 0x1b, 0x7f, 0x00, 0xd9, 0x96, 0x7a,
+	0x72, 0xa6, 0x0a, 0x29, 0xe9, 0xde, 0x74, 0x26, 0xe3, 0x35, 0xa2, 0x45, 0xc9, 0x98, 0x4a, 0xc5,
+	0x97, 0xaf, 0x4a, 0x5b, 0x3f, 0x7e, 0x5f, 0x62, 0x0f, 0x56, 0xae, 0x53, 0x50, 0xd4, 0x22, 0x2d,
+	0x22, 0xa1, 0x3e, 0x59, 0x13, 0xea, 0x3d, 0x5e, 0x28, 0x0e, 0xfb, 0x0f, 0xb4, 0xc2, 0x1f, 0x01,
+	0xc4, 0xc9, 0xf4, 0x4d, 0x43, 0xcc, 0x84, 0xbb, 0xb5, 0xdd, 0xc5, 0xbc, 0x5c, 0x88, 0x13, 0x6b,
+	0x36, 0xf4, 0xc4, 0x65, 0x4d, 0x43, 0x79, 0x89, 0x62, 0x69, 0x2b, 0xbc, 0xb4, 0x0f, 0xa7, 0x33,
+	0xf9, 0x3e, 0x5f, 0x08, 0xaf, 0xae, 0xb2, 0x54, 0x37, 0x7a, 0x03, 0x1b, 0x18, 0x13, 0xf8, 0xd1,
+	0x4a, 0xe0, 0x07, 0xd3, 0x99, 0x7c, 0x77, 0x13, 0xba, 0x4d, 0xe3, 0x5f, 0xd0, 0x4a, 0x63, 0x3b,
+	0xf0, 0x26, 0x1b, 0x95, 0xa0, 0x37, 0x57, 0xf2, 0x36, 0xf5, 0x7d, 0x72, 0x43, 0xdf, 0x5a, 0x71,
+	0x31, 0x2f, 0xe7, 0xb5, 0x58, 0x63, 0x4e, 0x6d, 0x11, 0xb6, 0x2d, 0x4a, 0xc6, 0xa6, 0x3d, 0x60,
+	0x52, 0xe7, 0xf5, 0x64, 0xaa, 0xfc, 0x84, 0x60, 0x2f, 0x4e, 0xb4, 0x33, 0xf2, 0x2f, 0x3b, 0x23,
+	0xcb, 0xe2, 0x72, 0x44, 0xff, 0x36, 0xc7, 0xa7, 0x90, 0x8f, 0x6b, 0xf7, 0xc5, 0x94, 0x9c, 0xae,
+	0xec, 0x1c, 0xdf, 0xbf, 0xc5, 0x84, 0xa1, 0x8e, 0xfa, 0x12, 0xfc, 0x07, 0x85, 0x29, 0xdf, 0x65,
+	0x00, 0xba, 0xe4, 0xdc, 0x8a, 0x0f, 0x86, 0xea, 0x9a, 0xdf, 0x25, 0xee, 0x51, 0x2b, 0xe8, 0x7f,
+	0xef, 0x76, 0xfc, 0x3e, 0x40, 0x10, 0xa6, 0x1b, 0xc5, 0xca, 0xb2, 0x58, 0x05, 0xb6, 0xc2, 0x82,
+	0x09, 0x90, 0xbe, 0xa2, 0x13, 0x31, 0xc7, 0xd6, 0xc3, 0x21, 0xde, 0x87, 0xec, 0x98, 0x58, 0x23,
+	0x2a, 0x6e, 0xb3, 0x23, 0x33, 0x9a, 0xe0, 0x1a, 0x60, 0x8f, 0xfa, 0xa6, 0x31, 0x22, 0x56, 0xdf,
+	0xa3, 0xc4, 0x8d, 0x0a, 0xcd, 0xcb, 0xa8, 0x92, 0xad, 0xed, 0x2f, 0xe6, 0x65, 0x41, 0x8f, 0x77,
+	0x75, 0x4a, 0x5c, 0x56, 0x8a, 0xe0, 0x6d, 0xac, 0x28, 0x3f, 0x24, 0x8d, 0x77, 0xc0, 0x37, 0x1e,
+	0x6b, 0x96, 0x95, 0xa2, 0x7c, 0xdb, 0x3d, 0x82, 0x5c, 0x5d, 0x57, 0x4f, 0xba, 0x6a, 0xd2, 0x78,
+	0xeb, 0x58, 0xdd, 0xa3, 0x24, 0xa0, 0x21, 0xd5, 0xeb, 0x34, 0x42, 0x2a, 0x75, 0x1b, 0xd5, 0x73,
+	0x8d, 0x98, 0x6a, 0xa8, 0x2d, 0xb5, 0xab, 0x0a, 0xe9, 0xdb, 0xa8, 0x06, 0xb5, 0x68, 0xb0, 0xd9,
+	0x9e, 0xbf, 0x21, 0xd8, 0xab, 0x8d, 0xac, 0xab, 0xd3, 0x89, 0x7d, 0x91, 0x5c, 0x3e, 0x6f, 0xd1,
+	0xcf, 0x32, 0xec, 0x8c, 0x6c, 0xdf, 0xb1, 0xcc, 0x0b, 0x33, 0xa0, 0x06, 0x73, 0x4d, 0x5e, 0xe7,
+	0x97, 0xde, 0xec, 0x03, 0x89, 0x6b, 0x87, 0x8c, 0x9c, 0x66, 0x7b, 0x89, 0xeb, 0x45, 0xd8, 0x76,
+	0xc9, 0xc4, 0x72, 0x88, 0xc1, 0x5e, 0x79, 0x51, 0x4f, 0xa6, 0xca, 0xb7, 0x08, 0xf6, 0xea, 0xce,
+	0xd0, 0x75, 0x46, 0xb6, 0x91, 0xd4, 0xd4, 0x80, 0xfc, 0x30, 0x1a, 0xfa, 0x22, 0x62, 0x8d, 0x55,
+	0xe1, 0xdc, 0xbe, 0x41, 0x57, 0x4f, 0xcd, 0xa1, 0x6b, 0xd1, 0x78, 0xa6, 0x2f, 0x7f, 0x29, 0x3d,
+	0x81, 0xdd, 0xb5, 0xad, 0x30, 0x89, 0x4e, 0x9c, 0x04, 0x8a, 0x92, 0x88, 0xa7, 0x87, 0x3f, 0xa7,
+	0x60, 0x87, 0xbb, 0xab, 0xf1, 0x87, 0xbc, 0x21, 0xd8, 0xf5, 0xc4, 0xed, 0x26, 0x6e, 0xa8, 0xc2,
+	0xae, 0xa6, 0x76, 0x3f, 0x6f, 0xeb, 0xcf, 0xfb, 0xea, 0x99, 0xaa, 0x75, 0x05, 0x14, 0x1d, 0xda,
+	0x1c, 0xba, 0x76, 0x5f, 0x1d, 0xc2, 0x4e, 0xf7, 0xa4, 0xd6, 0x52, 0x63, 0x3a, 0x3e, 0x96, 0x39,
+	0x9a, 0xeb, 0xf5, 0x03, 0x28, 0x74, 0x7a, 0xa7, 0x9f, 0xf5, 0x3b, 0xbd, 0x56, 0x4b, 0x48, 0x4b,
+	0xf7, 0xa7, 0x33, 0xf9, 0x0e, 0x47, 0x2e, 0x4f, 0xb3, 0x03, 0x28, 0xd4, 0x7a, 0xad, 0xe7, 0xfd,
+	0xd3, 0x2f, 0xb4, 0xba, 0x90, 0xb9, 0xc1, 0x25, 0x66, 0xc1, 0x8f, 0x21, 0x5f, 0x6f, 0xbf, 0xe8,
+	0xb4, 0x7b, 0x5a, 0x43, 0xc8, 0xde, 0xc0, 0x12, 0x45, 0x71, 0x05, 0x40, 0x6b, 0x37, 0x92, 0x0c,
+	0x73, 0x91, 0x31, 0xf9, 0x7a, 0x92, 0x4b, 0x5a, 0xba, 0x13, 0x1b, 0x93, 0x97, 0xad, 0x26, 0xfe,
+	0x7e, 0x5d, 0xda, 0xfa, 0xeb, 0xba, 0x84, 0xbe, 0x59, 0x94, 0xd0, 0xeb, 0x45, 0x09, 0xfd, 0xba,
+	0x28, 0xa1, 0x3f, 0x17, 0x25, 0x74, 0x9e, 0x63, 0x7f, 0x9d, 0x9e, 0xfe, 0x1d, 0x00, 0x00, 0xff,
+	0xff, 0x92, 0x82, 0xdb, 0x1a, 0x6e, 0x09, 0x00, 0x00,
 }
diff --git a/vendor/github.com/docker/libnetwork/networkdb/networkdb.proto b/vendor/github.com/docker/libnetwork/networkdb/networkdb.proto
index 7df1b42..0b8490b 100644
--- a/vendor/github.com/docker/libnetwork/networkdb/networkdb.proto
+++ b/vendor/github.com/docker/libnetwork/networkdb/networkdb.proto
@@ -109,7 +109,7 @@
 	// network event was recorded.
 	uint64 l_time = 2 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
 	// Source node name where this network attachment happened.
-	string node_name = 3;
+	string node_name = 3 [(gogoproto.customname) = "NodeName"];
 	// Indicates if a leave from this network is in progress.
 	bool leaving = 4;
 }
@@ -119,6 +119,8 @@
 	// Lamport time when this push pull was initiated.
 	uint64 l_time = 1 [(gogoproto.customtype) = "github.com/hashicorp/serf/serf.LamportTime", (gogoproto.nullable) = false];
 	repeated NetworkEntry networks = 2;
+	// Name of the node sending this push pull payload.
+	string node_name = 3 [(gogoproto.customname) = "NodeName"];
 }
 
 // TableEvent message payload definition.
@@ -152,6 +154,8 @@
 	string key = 6;
 	// Entry value.
 	bytes value = 7;
+	// Residual reap time for the entry before getting deleted in seconds
+	int32 residual_reap_time = 8 [(gogoproto.customname) = "ResidualReapTime"];;
 }
 
 // BulkSync message payload definition.
@@ -180,4 +184,4 @@
 
 	// A list of simple messages.
 	repeated SimpleMessage messages = 1;
-}
\ No newline at end of file
+}
diff --git a/vendor/github.com/docker/libnetwork/osl/namespace_windows.go b/vendor/github.com/docker/libnetwork/osl/namespace_windows.go
index bfdca30..49503c0 100644
--- a/vendor/github.com/docker/libnetwork/osl/namespace_windows.go
+++ b/vendor/github.com/docker/libnetwork/osl/namespace_windows.go
@@ -5,12 +5,7 @@
 // GenerateKey generates a sandbox key based on the passed
 // container id.
 func GenerateKey(containerID string) string {
-	maxLen := 12
-	if len(containerID) < maxLen {
-		maxLen = len(containerID)
-	}
-
-	return containerID[:maxLen]
+	return containerID
 }
 
 // NewSandbox provides a new sandbox instance created in an os specific way
diff --git a/vendor/github.com/docker/libnetwork/osl/neigh_linux.go b/vendor/github.com/docker/libnetwork/osl/neigh_linux.go
index 4e47948..6bf1c16 100644
--- a/vendor/github.com/docker/libnetwork/osl/neigh_linux.go
+++ b/vendor/github.com/docker/libnetwork/osl/neigh_linux.go
@@ -9,6 +9,17 @@
 	"github.com/vishvananda/netlink"
 )
 
+// NeighborSearchError indicates that the neighbor is already present
+type NeighborSearchError struct {
+	ip      net.IP
+	mac     net.HardwareAddr
+	present bool
+}
+
+func (n NeighborSearchError) Error() string {
+	return fmt.Sprintf("Search neighbor failed for IP %v, mac %v, present in db:%t", n.ip, n.mac, n.present)
+}
+
 // NeighOption is a function option type to set interface options
 type NeighOption func(nh *neigh)
 
@@ -41,7 +52,7 @@
 
 	nh := n.findNeighbor(dstIP, dstMac)
 	if nh == nil {
-		return fmt.Errorf("could not find the neighbor entry to delete")
+		return NeighborSearchError{dstIP, dstMac, false}
 	}
 
 	if osDelete {
@@ -103,26 +114,27 @@
 		}
 	}
 	n.Unlock()
-	logrus.Debugf("Neighbor entry deleted for IP %v, mac %v", dstIP, dstMac)
+	logrus.Debugf("Neighbor entry deleted for IP %v, mac %v osDelete:%t", dstIP, dstMac, osDelete)
 
 	return nil
 }
 
 func (n *networkNamespace) AddNeighbor(dstIP net.IP, dstMac net.HardwareAddr, force bool, options ...NeighOption) error {
 	var (
-		iface netlink.Link
-		err   error
+		iface                  netlink.Link
+		err                    error
+		neighborAlreadyPresent bool
 	)
 
 	// If the namespace already has the neighbor entry but the AddNeighbor is called
 	// because of a miss notification (force flag) program the kernel anyway.
 	nh := n.findNeighbor(dstIP, dstMac)
 	if nh != nil {
+		neighborAlreadyPresent = true
+		logrus.Warnf("Neighbor entry already present for IP %v, mac %v neighbor:%+v forceUpdate:%t", dstIP, dstMac, nh, force)
 		if !force {
-			logrus.Warnf("Neighbor entry already present for IP %v, mac %v", dstIP, dstMac)
-			return nil
+			return NeighborSearchError{dstIP, dstMac, true}
 		}
-		logrus.Warnf("Force kernel update, Neighbor entry already present for IP %v, mac %v", dstIP, dstMac)
 	}
 
 	nh = &neigh{
@@ -146,8 +158,7 @@
 	if nh.linkDst != "" {
 		iface, err = nlh.LinkByName(nh.linkDst)
 		if err != nil {
-			return fmt.Errorf("could not find interface with destination name %s: %v",
-				nh.linkDst, err)
+			return fmt.Errorf("could not find interface with destination name %s: %v", nh.linkDst, err)
 		}
 	}
 
@@ -167,13 +178,17 @@
 	}
 
 	if err := nlh.NeighSet(nlnh); err != nil {
-		return fmt.Errorf("could not add neighbor entry: %v", err)
+		return fmt.Errorf("could not add neighbor entry:%+v error:%v", nlnh, err)
+	}
+
+	if neighborAlreadyPresent {
+		return nil
 	}
 
 	n.Lock()
 	n.neighbors = append(n.neighbors, nh)
 	n.Unlock()
-	logrus.Debugf("Neighbor entry added for IP %v, mac %v", dstIP, dstMac)
+	logrus.Debugf("Neighbor entry added for IP:%v, mac:%v on ifc:%s", dstIP, dstMac, nh.linkName)
 
 	return nil
 }
diff --git a/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go b/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go
new file mode 100644
index 0000000..97d7fbb
--- /dev/null
+++ b/vendor/github.com/docker/libnetwork/portallocator/portallocator_freebsd.go
@@ -0,0 +1,42 @@
+package portallocator
+
+import (
+	"bytes"
+	"fmt"
+	"os/exec"
+)
+
+func getDynamicPortRange() (start int, end int, err error) {
+	portRangeKernelSysctl := []string{"net.inet.ip.portrange.hifirst", "net.ip.portrange.hilast"}
+	portRangeFallback := fmt.Sprintf("using fallback port range %d-%d", DefaultPortRangeStart, DefaultPortRangeEnd)
+	portRangeLowCmd := exec.Command("/sbin/sysctl", portRangeKernelSysctl[0])
+	var portRangeLowOut bytes.Buffer
+	portRangeLowCmd.Stdout = &portRangeLowOut
+	cmdErr := portRangeLowCmd.Run()
+	if cmdErr != nil {
+		return 0, 0, fmt.Errorf("port allocator - sysctl net.inet.ip.portrange.hifirst failed - %s: %v", portRangeFallback, err)
+	}
+	n, err := fmt.Sscanf(portRangeLowOut.String(), "%d", &start)
+	if n != 1 || err != nil {
+		if err == nil {
+			err = fmt.Errorf("unexpected count of parsed numbers (%d)", n)
+		}
+		return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range start from %s - %s: %v", portRangeLowOut.String(), portRangeFallback, err)
+	}
+
+	portRangeHighCmd := exec.Command("/sbin/sysctl", portRangeKernelSysctl[1])
+	var portRangeHighOut bytes.Buffer
+	portRangeHighCmd.Stdout = &portRangeHighOut
+	cmdErr = portRangeHighCmd.Run()
+	if cmdErr != nil {
+		return 0, 0, fmt.Errorf("port allocator - sysctl net.inet.ip.portrange.hilast failed - %s: %v", portRangeFallback, err)
+	}
+	n, err = fmt.Sscanf(portRangeHighOut.String(), "%d", &end)
+	if n != 1 || err != nil {
+		if err == nil {
+			err = fmt.Errorf("unexpected count of parsed numbers (%d)", n)
+		}
+		return 0, 0, fmt.Errorf("port allocator - failed to parse system ephemeral port range end from %s - %s: %v", portRangeHighOut.String(), portRangeFallback, err)
+	}
+	return start, end, nil
+}
diff --git a/vendor/github.com/docker/libnetwork/sandbox.go b/vendor/github.com/docker/libnetwork/sandbox.go
index 6f4c250..315195e 100644
--- a/vendor/github.com/docker/libnetwork/sandbox.go
+++ b/vendor/github.com/docker/libnetwork/sandbox.go
@@ -916,6 +916,13 @@
 			break
 		}
 	}
+
+	if index == -1 {
+		logrus.Warnf("Endpoint %s has already been deleted", ep.Name())
+		sb.Unlock()
+		return nil
+	}
+
 	heap.Remove(&sb.endpoints, index)
 	for _, e := range sb.endpoints {
 		if len(e.Gateway()) > 0 {
diff --git a/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go b/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go
index f18f0b3..afa3e79 100644
--- a/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go
+++ b/vendor/github.com/docker/libnetwork/sandbox_dns_unix.go
@@ -67,11 +67,7 @@
 		return err
 	}
 
-	if err := sb.setupDNS(); err != nil {
-		return err
-	}
-
-	return nil
+	return sb.setupDNS()
 }
 
 func (sb *sandbox) buildHostsFile() error {
@@ -197,14 +193,26 @@
 	// This is for the host mode networking
 	if sb.config.originResolvConfPath != "" {
 		if err := copyFile(sb.config.originResolvConfPath, sb.config.resolvConfPath); err != nil {
-			return fmt.Errorf("could not copy source resolv.conf file %s to %s: %v", sb.config.originResolvConfPath, sb.config.resolvConfPath, err)
+			if !os.IsNotExist(err) {
+				return fmt.Errorf("could not copy source resolv.conf file %s to %s: %v", sb.config.originResolvConfPath, sb.config.resolvConfPath, err)
+			}
+			logrus.Infof("%s does not exist, we create an empty resolv.conf for container", sb.config.originResolvConfPath)
+			if err := createFile(sb.config.resolvConfPath); err != nil {
+				return err
+			}
 		}
 		return nil
 	}
 
 	currRC, err := resolvconf.Get()
 	if err != nil {
-		return err
+		if !os.IsNotExist(err) {
+			return err
+		}
+		// it's ok to continue if /etc/resolv.conf doesn't exist, default resolvers (Google's Public DNS)
+		// will be used
+		currRC = &resolvconf.File{}
+		logrus.Infof("/etc/resolv.conf does not exist")
 	}
 
 	if len(sb.config.dnsList) > 0 || len(sb.config.dnsSearchList) > 0 || len(sb.config.dnsOptionsList) > 0 {
diff --git a/vendor/github.com/docker/libnetwork/service.go b/vendor/github.com/docker/libnetwork/service.go
index 5a0d7e0..63095f3 100644
--- a/vendor/github.com/docker/libnetwork/service.go
+++ b/vendor/github.com/docker/libnetwork/service.go
@@ -89,4 +89,5 @@
 
 	// Back pointer to service to which the loadbalancer belongs.
 	service *service
+	sync.Mutex
 }
diff --git a/vendor/github.com/docker/libnetwork/service_common.go b/vendor/github.com/docker/libnetwork/service_common.go
index 64d283e..4411c97 100644
--- a/vendor/github.com/docker/libnetwork/service_common.go
+++ b/vendor/github.com/docker/libnetwork/service_common.go
@@ -161,6 +161,19 @@
 	return int(lb.fwMark)
 }
 
+// cleanupServiceDiscovery when the network is being deleted, erase all the associated service discovery records
+func (c *controller) cleanupServiceDiscovery(cleanupNID string) {
+	c.Lock()
+	defer c.Unlock()
+	if cleanupNID == "" {
+		logrus.Debugf("cleanupServiceDiscovery for all networks")
+		c.svcRecords = make(map[string]svcInfo)
+		return
+	}
+	logrus.Debugf("cleanupServiceDiscovery for network:%s", cleanupNID)
+	delete(c.svcRecords, cleanupNID)
+}
+
 func (c *controller) cleanupServiceBindings(cleanupNID string) {
 	var cleanupFuncs []func()
 
@@ -184,15 +197,6 @@
 				continue
 			}
 
-			// The network is being deleted, erase all the associated service discovery records
-			// TODO(fcrisciani) separate the Load Balancer from the Service discovery, this operation
-			// can be done safely here, but the rmServiceBinding is still keeping consistency in the
-			// data structures that are tracking the endpoint to IP mapping.
-			c.Lock()
-			logrus.Debugf("cleanupServiceBindings erasing the svcRecords for %s", nid)
-			delete(c.svcRecords, nid)
-			c.Unlock()
-
 			for eid, ip := range lb.backEnds {
 				epID := eid
 				epIP := ip
@@ -287,7 +291,7 @@
 	// Add loadbalancer service and backend in all sandboxes in
 	// the network only if vip is valid.
 	if len(vip) != 0 {
-		n.(*network).addLBBackend(ip, vip, lb.fwMark, ingressPorts)
+		n.(*network).addLBBackend(ip, vip, lb, ingressPorts)
 	}
 
 	// Add the appropriate name resolutions
@@ -355,7 +359,7 @@
 	// Remove loadbalancer service(if needed) and backend in all
 	// sandboxes in the network only if the vip is valid.
 	if len(vip) != 0 && entries == 0 {
-		n.(*network).rmLBBackend(ip, vip, lb.fwMark, ingressPorts, rmService)
+		n.(*network).rmLBBackend(ip, vip, lb, ingressPorts, rmService)
 	}
 
 	// Delete the name resolutions
diff --git a/vendor/github.com/docker/libnetwork/service_linux.go b/vendor/github.com/docker/libnetwork/service_linux.go
index 0066594..d8a95b2 100644
--- a/vendor/github.com/docker/libnetwork/service_linux.go
+++ b/vendor/github.com/docker/libnetwork/service_linux.go
@@ -111,7 +111,7 @@
 
 // Add loadbalancer backend to all sandboxes which has a connection to
 // this network. If needed add the service as well.
-func (n *network) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig) {
+func (n *network) addLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig) {
 	n.WalkEndpoints(func(e Endpoint) bool {
 		ep := e.(*endpoint)
 		if sb, ok := ep.getSandbox(); ok {
@@ -124,7 +124,7 @@
 				gwIP = ep.Iface().Address().IP
 			}
 
-			sb.addLBBackend(ip, vip, fwMark, ingressPorts, ep.Iface().Address(), gwIP, n.ingress)
+			sb.addLBBackend(ip, vip, lb.fwMark, ingressPorts, ep.Iface().Address(), gwIP, n.ingress)
 		}
 
 		return false
@@ -134,7 +134,7 @@
 // Remove loadbalancer backend from all sandboxes which has a
 // connection to this network. If needed remove the service entry as
 // well, as specified by the rmService bool.
-func (n *network) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, rmService bool) {
+func (n *network) rmLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig, rmService bool) {
 	n.WalkEndpoints(func(e Endpoint) bool {
 		ep := e.(*endpoint)
 		if sb, ok := ep.getSandbox(); ok {
@@ -147,7 +147,7 @@
 				gwIP = ep.Iface().Address().IP
 			}
 
-			sb.rmLBBackend(ip, vip, fwMark, ingressPorts, ep.Iface().Address(), gwIP, rmService, n.ingress)
+			sb.rmLBBackend(ip, vip, lb.fwMark, ingressPorts, ep.Iface().Address(), gwIP, rmService, n.ingress)
 		}
 
 		return false
diff --git a/vendor/github.com/docker/libnetwork/service_windows.go b/vendor/github.com/docker/libnetwork/service_windows.go
index 6fe521e..9ed3e06 100644
--- a/vendor/github.com/docker/libnetwork/service_windows.go
+++ b/vendor/github.com/docker/libnetwork/service_windows.go
@@ -1,11 +1,146 @@
 package libnetwork
 
-import "net"
+import (
+	"net"
 
-func (n *network) addLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig) {
+	"github.com/Microsoft/hcsshim"
+	"github.com/docker/docker/pkg/system"
+	"github.com/sirupsen/logrus"
+)
+
+type policyLists struct {
+	ilb *hcsshim.PolicyList
+	elb *hcsshim.PolicyList
 }
 
-func (n *network) rmLBBackend(ip, vip net.IP, fwMark uint32, ingressPorts []*PortConfig, rmService bool) {
+var lbPolicylistMap map[*loadBalancer]*policyLists
+
+func init() {
+	lbPolicylistMap = make(map[*loadBalancer]*policyLists)
+}
+
+func (n *network) addLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig) {
+
+	if system.GetOSVersion().Build > 16236 {
+		lb.Lock()
+		defer lb.Unlock()
+		//find the load balancer IP for the network.
+		var sourceVIP string
+		for _, e := range n.Endpoints() {
+			epInfo := e.Info()
+			if epInfo == nil {
+				continue
+			}
+			if epInfo.LoadBalancer() {
+				sourceVIP = epInfo.Iface().Address().IP.String()
+				break
+			}
+		}
+
+		if sourceVIP == "" {
+			logrus.Errorf("Failed to find load balancer IP for network %s", n.Name())
+			return
+		}
+
+		var endpoints []hcsshim.HNSEndpoint
+
+		for eid := range lb.backEnds {
+			//Call HNS to get back ID (GUID) corresponding to the endpoint.
+			hnsEndpoint, err := hcsshim.GetHNSEndpointByName(eid)
+			if err != nil {
+				logrus.Errorf("Failed to find HNS ID for endpoint %v: %v", eid, err)
+				return
+			}
+
+			endpoints = append(endpoints, *hnsEndpoint)
+		}
+
+		if policies, ok := lbPolicylistMap[lb]; ok {
+
+			if policies.ilb != nil {
+				policies.ilb.Delete()
+				policies.ilb = nil
+			}
+
+			if policies.elb != nil {
+				policies.elb.Delete()
+				policies.elb = nil
+			}
+			delete(lbPolicylistMap, lb)
+		}
+
+		ilbPolicy, err := hcsshim.AddLoadBalancer(endpoints, true, sourceVIP, vip.String(), 0, 0, 0)
+		if err != nil {
+			logrus.Errorf("Failed to add ILB policy for service %s (%s) with endpoints %v using load balancer IP %s on network %s: %v",
+				lb.service.name, vip.String(), endpoints, sourceVIP, n.Name(), err)
+			return
+		}
+
+		lbPolicylistMap[lb] = &policyLists{
+			ilb: ilbPolicy,
+		}
+
+		publishedPorts := make(map[uint32]uint32)
+
+		for i, port := range ingressPorts {
+			protocol := uint16(6)
+
+			// Skip already published port
+			if publishedPorts[port.PublishedPort] == port.TargetPort {
+				continue
+			}
+
+			if port.Protocol == ProtocolUDP {
+				protocol = 17
+			}
+
+			// check if already has udp matching to add wild card publishing
+			for j := i + 1; j < len(ingressPorts); j++ {
+				if ingressPorts[j].TargetPort == port.TargetPort &&
+					ingressPorts[j].PublishedPort == port.PublishedPort {
+					protocol = 0
+				}
+			}
+
+			publishedPorts[port.PublishedPort] = port.TargetPort
+
+			lbPolicylistMap[lb].elb, err = hcsshim.AddLoadBalancer(endpoints, false, sourceVIP, "", protocol, uint16(port.TargetPort), uint16(port.PublishedPort))
+			if err != nil {
+				logrus.Errorf("Failed to add ELB policy for service %s (ip:%s target port:%v published port:%v) with endpoints %v using load balancer IP %s on network %s: %v",
+					lb.service.name, vip.String(), uint16(port.TargetPort), uint16(port.PublishedPort), endpoints, sourceVIP, n.Name(), err)
+				return
+			}
+		}
+	}
+}
+
+func (n *network) rmLBBackend(ip, vip net.IP, lb *loadBalancer, ingressPorts []*PortConfig, rmService bool) {
+	if system.GetOSVersion().Build > 16236 {
+		if len(lb.backEnds) > 0 {
+			//Reprogram HNS (actually VFP) with the existing backends.
+			n.addLBBackend(ip, vip, lb, ingressPorts)
+		} else {
+			lb.Lock()
+			defer lb.Unlock()
+			logrus.Debugf("No more backends for service %s (ip:%s).  Removing all policies", lb.service.name, lb.vip.String())
+
+			if policyLists, ok := lbPolicylistMap[lb]; ok {
+				if policyLists.ilb != nil {
+					policyLists.ilb.Delete()
+					policyLists.ilb = nil
+				}
+
+				if policyLists.elb != nil {
+					policyLists.elb.Delete()
+					policyLists.elb = nil
+				}
+				delete(lbPolicylistMap, lb)
+
+			} else {
+				logrus.Errorf("Failed to find policies for service %s (%s)", lb.service.name, lb.vip.String())
+			}
+		}
+	}
 }
 
 func (sb *sandbox) populateLoadbalancers(ep *endpoint) {
diff --git a/vendor/github.com/docker/libnetwork/types/types.go b/vendor/github.com/docker/libnetwork/types/types.go
index da113e2..164b180 100644
--- a/vendor/github.com/docker/libnetwork/types/types.go
+++ b/vendor/github.com/docker/libnetwork/types/types.go
@@ -129,11 +129,11 @@
 func (p *PortBinding) String() string {
 	ret := fmt.Sprintf("%s/", p.Proto)
 	if p.IP != nil {
-		ret = fmt.Sprintf("%s%s", ret, p.IP.String())
+		ret += p.IP.String()
 	}
 	ret = fmt.Sprintf("%s:%d/", ret, p.Port)
 	if p.HostIP != nil {
-		ret = fmt.Sprintf("%s%s", ret, p.HostIP.String())
+		ret += p.HostIP.String()
 	}
 	ret = fmt.Sprintf("%s:%d", ret, p.HostPort)
 	return ret
diff --git a/vendor/github.com/docker/libnetwork/vendor.conf b/vendor/github.com/docker/libnetwork/vendor.conf
index 6751cba..c97f551 100644
--- a/vendor/github.com/docker/libnetwork/vendor.conf
+++ b/vendor/github.com/docker/libnetwork/vendor.conf
@@ -1,7 +1,7 @@
 github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
 github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060
 github.com/Microsoft/go-winio ce2922f643c8fd76b46cadc7f404a06282678b34
-github.com/Microsoft/hcsshim v0.6.1
+github.com/Microsoft/hcsshim v0.6.3
 github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec
 github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80
 github.com/boltdb/bolt c6ba97b89e0454fec9aa92e1d33a4e2c5fc1f631
diff --git a/vendor/github.com/docker/swarmkit/README.md b/vendor/github.com/docker/swarmkit/README.md
index 4900fe3..ffc744c 100644
--- a/vendor/github.com/docker/swarmkit/README.md
+++ b/vendor/github.com/docker/swarmkit/README.md
@@ -153,6 +153,12 @@
 $ swarmd -d /tmp/node-3 --hostname node-3 --join-addr 127.0.0.1:4242 --join-token <Worker Token>
 ```
 
+If joining as a manager, also specify the listen-control-api.
+
+```sh
+$ swarmd -d /tmp/node-4 --hostname node-4 --join-addr 127.0.0.1:4242 --join-token <Manager Token> --listen-control-api /tmp/node-4/swarm.sock --listen-remote-api 127.0.0.1:4245
+```
+
 In a fourth terminal, use `swarmctl` to explore and control the cluster. Before
 running `swarmctl`, set the `SWARM_SOCKET` environment variable to the path of the
 manager socket that was specified in `--listen-control-api` when starting the
@@ -168,6 +174,7 @@
 3x12fpoi36eujbdkgdnbvbi6r  node-2  ACCEPTED    READY   ACTIVE
 4spl3tyipofoa2iwqgabsdcve  node-1  ACCEPTED    READY   ACTIVE        REACHABLE *
 dknwk1uqxhnyyujq66ho0h54t  node-3  ACCEPTED    READY   ACTIVE
+zw3rwfawdasdewfq66ho34eaw  node-4  ACCEPTED    READY   ACTIVE        REACHABLE
 
 
 ```
diff --git a/vendor/github.com/docker/swarmkit/agent/exec/controller.go b/vendor/github.com/docker/swarmkit/agent/exec/controller.go
index 85110ba..9b4fc7b 100644
--- a/vendor/github.com/docker/swarmkit/agent/exec/controller.go
+++ b/vendor/github.com/docker/swarmkit/agent/exec/controller.go
@@ -119,6 +119,7 @@
 		// we always want to proceed to accepted when we resolve the controller
 		status.Message = "accepted"
 		status.State = api.TaskStateAccepted
+		status.Err = ""
 	}
 
 	return ctlr, status, err
@@ -158,6 +159,7 @@
 		current := status.State
 		status.State = state
 		status.Message = msg
+		status.Err = ""
 
 		if current > state {
 			panic("invalid state transition")
diff --git a/vendor/github.com/docker/swarmkit/api/genericresource/parse.go b/vendor/github.com/docker/swarmkit/api/genericresource/parse.go
index de30908..f39a707 100644
--- a/vendor/github.com/docker/swarmkit/api/genericresource/parse.go
+++ b/vendor/github.com/docker/swarmkit/api/genericresource/parse.go
@@ -1,6 +1,7 @@
 package genericresource
 
 import (
+	"encoding/csv"
 	"fmt"
 	"strconv"
 	"strings"
@@ -12,36 +13,99 @@
 	return fmt.Errorf("could not parse GenericResource: "+format, args...)
 }
 
-// Parse parses the GenericResource resources given by the arguments
-func Parse(cmd string) ([]*api.GenericResource, error) {
-	var rs []*api.GenericResource
+// discreteResourceVal returns an int64 if the string is a discreteResource
+// and an error if it isn't
+func discreteResourceVal(res string) (int64, error) {
+	return strconv.ParseInt(res, 10, 64)
+}
 
-	for _, term := range strings.Split(cmd, ";") {
+// allNamedResources returns true if the array of resources are all namedResources
+// e.g: res = [red, orange, green]
+func allNamedResources(res []string) bool {
+	for _, v := range res {
+		if _, err := discreteResourceVal(v); err == nil {
+			return false
+		}
+	}
+
+	return true
+}
+
+// ParseCmd parses the Generic Resource command line argument
+// and returns a list of *api.GenericResource
+func ParseCmd(cmd string) ([]*api.GenericResource, error) {
+	if strings.Contains(cmd, "\n") {
+		return nil, newParseError("unexpected '\\n' character")
+	}
+
+	r := csv.NewReader(strings.NewReader(cmd))
+	records, err := r.ReadAll()
+
+	if err != nil {
+		return nil, newParseError("%v", err)
+	}
+
+	if len(records) != 1 {
+		return nil, newParseError("found multiple records while parsing cmd %v", records)
+	}
+
+	return Parse(records[0])
+}
+
+// Parse parses a table of GenericResource resources
+func Parse(cmds []string) ([]*api.GenericResource, error) {
+	tokens := make(map[string][]string)
+
+	for _, term := range cmds {
 		kva := strings.Split(term, "=")
 		if len(kva) != 2 {
-			return nil, newParseError("incorrect term %s, missing '=' or malformed expr", term)
+			return nil, newParseError("incorrect term %s, missing"+
+				" '=' or malformed expression", term)
 		}
 
 		key := strings.TrimSpace(kva[0])
 		val := strings.TrimSpace(kva[1])
 
-		u, err := strconv.ParseInt(val, 10, 64)
-		if err == nil {
+		tokens[key] = append(tokens[key], val)
+	}
+
+	var rs []*api.GenericResource
+	for k, v := range tokens {
+		if u, ok := isDiscreteResource(v); ok {
 			if u < 0 {
-				return nil, newParseError("cannot ask for negative resource %s", key)
+				return nil, newParseError("cannot ask for"+
+					" negative resource %s", k)
 			}
-			rs = append(rs, NewDiscrete(key, u))
+
+			rs = append(rs, NewDiscrete(k, u))
 			continue
 		}
 
-		if len(val) > 2 && val[0] == '{' && val[len(val)-1] == '}' {
-			val = val[1 : len(val)-1]
-			rs = append(rs, NewSet(key, strings.Split(val, ",")...)...)
+		if allNamedResources(v) {
+			rs = append(rs, NewSet(k, v...)...)
 			continue
 		}
 
-		return nil, newParseError("could not parse expression '%s'", term)
+		return nil, newParseError("mixed discrete and named resources"+
+			" in expression '%s=%s'", k, v)
 	}
 
 	return rs, nil
 }
+
+// isDiscreteResource returns true if the array of resources is a
+// Discrete Resource.
+// e.g: res = [1]
+func isDiscreteResource(values []string) (int64, bool) {
+	if len(values) != 1 {
+		return int64(0), false
+	}
+
+	u, err := discreteResourceVal(values[0])
+	if err != nil {
+		return int64(0), false
+	}
+
+	return u, true
+
+}
diff --git a/vendor/github.com/docker/swarmkit/api/objects.pb.go b/vendor/github.com/docker/swarmkit/api/objects.pb.go
index d7a2f01..01abbe5 100644
--- a/vendor/github.com/docker/swarmkit/api/objects.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/objects.pb.go
@@ -57,6 +57,7 @@
 	// ManagerStatus provides the current status of the node's manager
 	// component, if the node is a manager.
 	ManagerStatus *ManagerStatus `protobuf:"bytes,6,opt,name=manager_status,json=managerStatus" json:"manager_status,omitempty"`
+	// DEPRECATED: Use lb_attachments to find the ingress network
 	// The node attachment to the ingress network.
 	Attachment *NetworkAttachment `protobuf:"bytes,7,opt,name=attachment" json:"attachment,omitempty"`
 	// Certificate is the TLS certificate issued for the node, if any.
@@ -71,6 +72,10 @@
 	// shows the privilege level that the CA would currently grant when
 	// issuing or renewing the node's certificate.
 	Role NodeRole `protobuf:"varint,9,opt,name=role,proto3,enum=docker.swarmkit.v1.NodeRole" json:"role,omitempty"`
+	// Attachments enumerates the network attachments for the node to set up an
+	// endpoint on the node to be used for load balancing. Each overlay
+	// network, including ingress network, will have an NetworkAttachment.
+	Attachments []*NetworkAttachment `protobuf:"bytes,10,rep,name=attachments" json:"attachments,omitempty"`
 }
 
 func (m *Node) Reset()                    { *m = Node{} }
@@ -403,6 +408,14 @@
 		github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachment, o.Attachment)
 	}
 	github_com_docker_swarmkit_api_deepcopy.Copy(&m.Certificate, &o.Certificate)
+	if o.Attachments != nil {
+		m.Attachments = make([]*NetworkAttachment, len(o.Attachments))
+		for i := range m.Attachments {
+			m.Attachments[i] = &NetworkAttachment{}
+			github_com_docker_swarmkit_api_deepcopy.Copy(m.Attachments[i], o.Attachments[i])
+		}
+	}
+
 }
 
 func (m *Service) Copy() *Service {
@@ -849,6 +862,18 @@
 		i++
 		i = encodeVarintObjects(dAtA, i, uint64(m.Role))
 	}
+	if len(m.Attachments) > 0 {
+		for _, msg := range m.Attachments {
+			dAtA[i] = 0x52
+			i++
+			i = encodeVarintObjects(dAtA, i, uint64(msg.Size()))
+			n, err := msg.MarshalTo(dAtA[i:])
+			if err != nil {
+				return 0, err
+			}
+			i += n
+		}
+	}
 	return i, nil
 }
 
@@ -1670,6 +1695,12 @@
 	if m.Role != 0 {
 		n += 1 + sovObjects(uint64(m.Role))
 	}
+	if len(m.Attachments) > 0 {
+		for _, e := range m.Attachments {
+			l = e.Size()
+			n += 1 + l + sovObjects(uint64(l))
+		}
+	}
 	return n
 }
 
@@ -4383,6 +4414,7 @@
 		`Attachment:` + strings.Replace(fmt.Sprintf("%v", this.Attachment), "NetworkAttachment", "NetworkAttachment", 1) + `,`,
 		`Certificate:` + strings.Replace(strings.Replace(this.Certificate.String(), "Certificate", "Certificate", 1), `&`, ``, 1) + `,`,
 		`Role:` + fmt.Sprintf("%v", this.Role) + `,`,
+		`Attachments:` + strings.Replace(fmt.Sprintf("%v", this.Attachments), "NetworkAttachment", "NetworkAttachment", 1) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -5017,6 +5049,37 @@
 					break
 				}
 			}
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Attachments", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowObjects
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthObjects
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Attachments = append(m.Attachments, &NetworkAttachment{})
+			if err := m.Attachments[len(m.Attachments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
 		default:
 			iNdEx = preIndex
 			skippy, err := skipObjects(dAtA[iNdEx:])
@@ -7689,100 +7752,101 @@
 func init() { proto.RegisterFile("github.com/docker/swarmkit/api/objects.proto", fileDescriptorObjects) }
 
 var fileDescriptorObjects = []byte{
-	// 1513 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4d, 0x6f, 0x1b, 0x4f,
-	0x19, 0xef, 0xda, 0x1b, 0xbf, 0x3c, 0x4e, 0x4c, 0x98, 0x7f, 0x08, 0x5b, 0x13, 0xec, 0xe0, 0x0a,
-	0x54, 0x55, 0x95, 0x53, 0x42, 0x81, 0x34, 0x50, 0x5a, 0x3b, 0x89, 0x5a, 0xab, 0x94, 0x46, 0xd3,
-	0xd2, 0x72, 0x5b, 0x26, 0xbb, 0x53, 0x77, 0xf1, 0x7a, 0x67, 0xb5, 0x33, 0x76, 0xf1, 0x8d, 0x73,
-	0xf8, 0x00, 0xb9, 0x71, 0xe8, 0x57, 0x80, 0x0b, 0x17, 0x0e, 0x9c, 0x7a, 0xe4, 0x84, 0x38, 0x45,
-	0xd4, 0xdf, 0x02, 0x89, 0x03, 0x9a, 0xd9, 0x59, 0x7b, 0x13, 0xaf, 0x93, 0x14, 0x55, 0xd1, 0xff,
-	0x94, 0x99, 0x9d, 0xdf, 0xef, 0x79, 0x9b, 0xe7, 0x65, 0x62, 0xb8, 0xdb, 0xf3, 0xc4, 0xbb, 0xe1,
-	0x51, 0xcb, 0x61, 0x83, 0x2d, 0x97, 0x39, 0x7d, 0x1a, 0x6d, 0xf1, 0xf7, 0x24, 0x1a, 0xf4, 0x3d,
-	0xb1, 0x45, 0x42, 0x6f, 0x8b, 0x1d, 0xfd, 0x8e, 0x3a, 0x82, 0xb7, 0xc2, 0x88, 0x09, 0x86, 0x50,
-	0x0c, 0x69, 0x25, 0x90, 0xd6, 0xe8, 0x87, 0xb5, 0x3b, 0x97, 0x48, 0x10, 0xe3, 0x90, 0x6a, 0xfe,
-	0xa5, 0x58, 0x1e, 0x52, 0x27, 0xc1, 0x36, 0x7a, 0x8c, 0xf5, 0x7c, 0xba, 0xa5, 0x76, 0x47, 0xc3,
-	0xb7, 0x5b, 0xc2, 0x1b, 0x50, 0x2e, 0xc8, 0x20, 0xd4, 0x80, 0xb5, 0x1e, 0xeb, 0x31, 0xb5, 0xdc,
-	0x92, 0x2b, 0xfd, 0xf5, 0xe6, 0x79, 0x1a, 0x09, 0xc6, 0xfa, 0xe8, 0xa7, 0x17, 0x68, 0x9f, 0xc2,
-	0x43, 0x7f, 0xd8, 0xf3, 0x02, 0xfd, 0x27, 0x26, 0x36, 0xff, 0x6a, 0x80, 0xf9, 0x9c, 0x0a, 0x82,
-	0x7e, 0x06, 0xc5, 0x11, 0x8d, 0xb8, 0xc7, 0x02, 0xcb, 0xd8, 0x34, 0x6e, 0x57, 0xb6, 0xbf, 0xd3,
-	0x9a, 0x8f, 0x48, 0xeb, 0x75, 0x0c, 0xe9, 0x98, 0x1f, 0x4f, 0x1b, 0x37, 0x70, 0xc2, 0x40, 0x0f,
-	0x00, 0x9c, 0x88, 0x12, 0x41, 0x5d, 0x9b, 0x08, 0x2b, 0xa7, 0xf8, 0xb5, 0x56, 0x6c, 0x6e, 0x2b,
-	0xd1, 0xdf, 0x7a, 0x95, 0x78, 0x89, 0xcb, 0x1a, 0xdd, 0x16, 0x92, 0x3a, 0x0c, 0xdd, 0x84, 0x9a,
-	0xbf, 0x9c, 0xaa, 0xd1, 0x6d, 0xd1, 0xfc, 0xb3, 0x09, 0xe6, 0xaf, 0x98, 0x4b, 0xd1, 0x3a, 0xe4,
-	0x3c, 0x57, 0x99, 0x5d, 0xee, 0x14, 0x26, 0xa7, 0x8d, 0x5c, 0x77, 0x1f, 0xe7, 0x3c, 0x17, 0x6d,
-	0x83, 0x39, 0xa0, 0x82, 0x68, 0x83, 0xac, 0x2c, 0x87, 0xa4, 0xef, 0xda, 0x1b, 0x85, 0x45, 0x3f,
-	0x01, 0x53, 0x5e, 0x95, 0xb6, 0x64, 0x23, 0x8b, 0x23, 0x75, 0xbe, 0x0c, 0xa9, 0x93, 0xf0, 0x24,
-	0x1e, 0x1d, 0x40, 0xc5, 0xa5, 0xdc, 0x89, 0xbc, 0x50, 0xc8, 0x18, 0x9a, 0x8a, 0x7e, 0x6b, 0x11,
-	0x7d, 0x7f, 0x06, 0xc5, 0x69, 0x1e, 0xfa, 0x39, 0x14, 0xb8, 0x20, 0x62, 0xc8, 0xad, 0x25, 0x25,
-	0xa1, 0xbe, 0xd0, 0x00, 0x85, 0xd2, 0x26, 0x68, 0x0e, 0x7a, 0x0a, 0xd5, 0x01, 0x09, 0x48, 0x8f,
-	0x46, 0xb6, 0x96, 0x52, 0x50, 0x52, 0xbe, 0x97, 0xe9, 0x7a, 0x8c, 0x8c, 0x05, 0xe1, 0x95, 0x41,
-	0x7a, 0x8b, 0x0e, 0x00, 0x88, 0x10, 0xc4, 0x79, 0x37, 0xa0, 0x81, 0xb0, 0x8a, 0x4a, 0xca, 0xf7,
-	0x33, 0x6d, 0xa1, 0xe2, 0x3d, 0x8b, 0xfa, 0xed, 0x29, 0x18, 0xa7, 0x88, 0xe8, 0x09, 0x54, 0x1c,
-	0x1a, 0x09, 0xef, 0xad, 0xe7, 0x10, 0x41, 0xad, 0x92, 0x92, 0xd3, 0xc8, 0x92, 0xb3, 0x37, 0x83,
-	0x69, 0xa7, 0xd2, 0x4c, 0x74, 0x0f, 0xcc, 0x88, 0xf9, 0xd4, 0x2a, 0x6f, 0x1a, 0xb7, 0xab, 0x8b,
-	0xaf, 0x05, 0x33, 0x9f, 0x62, 0x85, 0xdc, 0x5d, 0x3f, 0x3e, 0x69, 0x22, 0x58, 0x2d, 0x19, 0xab,
-	0x86, 0x4a, 0x0d, 0xe3, 0x9e, 0xf1, 0x1b, 0xe3, 0xb7, 0x46, 0xf3, 0xbf, 0x79, 0x28, 0xbe, 0xa4,
-	0xd1, 0xc8, 0x73, 0xbe, 0x6c, 0xe2, 0x3c, 0x38, 0x93, 0x38, 0x99, 0x3e, 0x6a, 0xb5, 0x73, 0xb9,
-	0xb3, 0x03, 0x25, 0x1a, 0xb8, 0x21, 0xf3, 0x02, 0xa1, 0x13, 0x27, 0xd3, 0xc1, 0x03, 0x8d, 0xc1,
-	0x53, 0x34, 0x3a, 0x80, 0x95, 0xb8, 0x1e, 0xec, 0x33, 0x59, 0xb3, 0x99, 0x45, 0xff, 0xb5, 0x02,
-	0xea, 0xeb, 0x5e, 0x1e, 0xa6, 0x76, 0x68, 0x1f, 0x56, 0xc2, 0x88, 0x8e, 0x3c, 0x36, 0xe4, 0xb6,
-	0x72, 0xa2, 0x70, 0x25, 0x27, 0xf0, 0x72, 0xc2, 0x92, 0x3b, 0xf4, 0x0b, 0x58, 0x96, 0x64, 0x3b,
-	0xe9, 0x23, 0x70, 0x69, 0x1f, 0xc1, 0x15, 0x49, 0xd0, 0x1b, 0xf4, 0x02, 0xbe, 0x75, 0xc6, 0x8a,
-	0xa9, 0xa0, 0xca, 0xe5, 0x82, 0xbe, 0x4a, 0x5b, 0xa2, 0x3f, 0xee, 0xa2, 0xe3, 0x93, 0x66, 0x15,
-	0x96, 0xd3, 0x29, 0xd0, 0xfc, 0x53, 0x0e, 0x4a, 0x49, 0x20, 0xd1, 0x7d, 0x7d, 0x67, 0xc6, 0xe2,
-	0xa8, 0x25, 0x58, 0xe5, 0x6f, 0x7c, 0x5d, 0xf7, 0x61, 0x29, 0x64, 0x91, 0xe0, 0x56, 0x6e, 0x33,
-	0xbf, 0xa8, 0x44, 0x0f, 0x59, 0x24, 0xf6, 0x58, 0xf0, 0xd6, 0xeb, 0xe1, 0x18, 0x8c, 0xde, 0x40,
-	0x65, 0xe4, 0x45, 0x62, 0x48, 0x7c, 0xdb, 0x0b, 0xb9, 0x95, 0x57, 0xdc, 0x1f, 0x5c, 0xa4, 0xb2,
-	0xf5, 0x3a, 0xc6, 0x77, 0x0f, 0x3b, 0xd5, 0xc9, 0x69, 0x03, 0xa6, 0x5b, 0x8e, 0x41, 0x8b, 0xea,
-	0x86, 0xbc, 0xf6, 0x1c, 0xca, 0xd3, 0x13, 0x74, 0x17, 0x20, 0x88, 0x2b, 0xd2, 0x9e, 0x66, 0xf6,
-	0xca, 0xe4, 0xb4, 0x51, 0xd6, 0x75, 0xda, 0xdd, 0xc7, 0x65, 0x0d, 0xe8, 0xba, 0x08, 0x81, 0x49,
-	0x5c, 0x37, 0x52, 0x79, 0x5e, 0xc6, 0x6a, 0xdd, 0xfc, 0x63, 0x11, 0xcc, 0x57, 0x84, 0xf7, 0xaf,
-	0xbb, 0xab, 0x4a, 0x9d, 0x73, 0x95, 0x71, 0x17, 0x80, 0xc7, 0xf9, 0x26, 0xdd, 0x31, 0x67, 0xee,
-	0xe8, 0x2c, 0x94, 0xee, 0x68, 0x40, 0xec, 0x0e, 0xf7, 0x99, 0x50, 0x45, 0x60, 0x62, 0xb5, 0x46,
-	0xb7, 0xa0, 0x18, 0x30, 0x57, 0xd1, 0x0b, 0x8a, 0x0e, 0x93, 0xd3, 0x46, 0x41, 0xf6, 0x8a, 0xee,
-	0x3e, 0x2e, 0xc8, 0xa3, 0xae, 0x2b, 0xdb, 0x14, 0x09, 0x02, 0x26, 0x88, 0xec, 0xc1, 0x5c, 0xb7,
-	0xbb, 0xcc, 0xec, 0x6f, 0xcf, 0x60, 0x49, 0x9b, 0x4a, 0x31, 0xd1, 0x6b, 0xf8, 0x2a, 0xb1, 0x37,
-	0x2d, 0xb0, 0xf4, 0x39, 0x02, 0x91, 0x96, 0x90, 0x3a, 0x49, 0x8d, 0x85, 0xf2, 0xe2, 0xb1, 0xa0,
-	0x22, 0x98, 0x35, 0x16, 0x3a, 0xb0, 0xe2, 0x52, 0xee, 0x45, 0xd4, 0x55, 0x6d, 0x82, 0xaa, 0xca,
-	0xac, 0x6e, 0x7f, 0xf7, 0x22, 0x21, 0x14, 0x2f, 0x6b, 0x8e, 0xda, 0xa1, 0x36, 0x94, 0x74, 0xde,
-	0x70, 0xab, 0xa2, 0x72, 0xf7, 0x8a, 0xe3, 0x60, 0x4a, 0x3b, 0xd3, 0xe6, 0x96, 0x3f, 0xab, 0xcd,
-	0x3d, 0x00, 0xf0, 0x59, 0xcf, 0x76, 0x23, 0x6f, 0x44, 0x23, 0x6b, 0x45, 0x3f, 0x12, 0x32, 0xb8,
-	0xfb, 0x0a, 0x81, 0xcb, 0x3e, 0xeb, 0xc5, 0xcb, 0xb9, 0xa6, 0x54, 0xfd, 0xcc, 0xa6, 0x44, 0xa0,
-	0x46, 0x38, 0xf7, 0x7a, 0x01, 0x75, 0xed, 0x1e, 0x0d, 0x68, 0xe4, 0x39, 0x76, 0x44, 0x39, 0x1b,
-	0x46, 0x0e, 0xe5, 0xd6, 0x37, 0x54, 0x24, 0x32, 0xc7, 0xfc, 0x93, 0x18, 0x8c, 0x35, 0x16, 0x5b,
-	0x89, 0x98, 0x73, 0x07, 0x7c, 0xb7, 0x76, 0x7c, 0xd2, 0x5c, 0x87, 0xb5, 0x74, 0x9b, 0xda, 0x31,
-	0x1e, 0x1b, 0x4f, 0x8d, 0x43, 0xa3, 0xf9, 0xf7, 0x1c, 0x7c, 0x73, 0x2e, 0xa6, 0xe8, 0xc7, 0x50,
-	0xd4, 0x51, 0xbd, 0xe8, 0xb1, 0xa6, 0x79, 0x38, 0xc1, 0xa2, 0x0d, 0x28, 0xcb, 0x12, 0xa7, 0x9c,
-	0xd3, 0xb8, 0x79, 0x95, 0xf1, 0xec, 0x03, 0xb2, 0xa0, 0x48, 0x7c, 0x8f, 0xc8, 0xb3, 0xbc, 0x3a,
-	0x4b, 0xb6, 0x68, 0x08, 0xeb, 0x71, 0xe8, 0xed, 0xd9, 0x68, 0xb7, 0x59, 0x28, 0xb8, 0x65, 0x2a,
-	0xff, 0x1f, 0x5d, 0x29, 0x13, 0xf4, 0xe5, 0xcc, 0x3e, 0xbc, 0x08, 0x05, 0x3f, 0x08, 0x44, 0x34,
-	0xc6, 0x6b, 0x6e, 0xc6, 0x51, 0xed, 0x09, 0xdc, 0x5c, 0x48, 0x41, 0xab, 0x90, 0xef, 0xd3, 0x71,
-	0xdc, 0x9e, 0xb0, 0x5c, 0xa2, 0x35, 0x58, 0x1a, 0x11, 0x7f, 0x48, 0x75, 0x37, 0x8b, 0x37, 0xbb,
-	0xb9, 0x1d, 0xa3, 0xf9, 0x21, 0x07, 0x45, 0x6d, 0xce, 0x75, 0x8f, 0x7c, 0xad, 0x76, 0xae, 0xb1,
-	0x3d, 0x84, 0x65, 0x1d, 0xd2, 0xb8, 0x22, 0xcd, 0x4b, 0x73, 0xba, 0x12, 0xe3, 0xe3, 0x6a, 0x7c,
-	0x08, 0xa6, 0x17, 0x92, 0x81, 0x1e, 0xf7, 0x99, 0x9a, 0xbb, 0x87, 0xed, 0xe7, 0x2f, 0xc2, 0xb8,
-	0xb1, 0x94, 0x26, 0xa7, 0x0d, 0x53, 0x7e, 0xc0, 0x8a, 0x96, 0x39, 0x18, 0xff, 0xb2, 0x04, 0xc5,
-	0x3d, 0x7f, 0xc8, 0x05, 0x8d, 0xae, 0x3b, 0x48, 0x5a, 0xed, 0x5c, 0x90, 0xf6, 0xa0, 0x18, 0x31,
-	0x26, 0x6c, 0x87, 0x5c, 0x14, 0x1f, 0xcc, 0x98, 0xd8, 0x6b, 0x77, 0xaa, 0x92, 0x28, 0x7b, 0x7b,
-	0xbc, 0xc7, 0x05, 0x49, 0xdd, 0x23, 0xe8, 0x0d, 0xac, 0x27, 0x13, 0xf1, 0x88, 0x31, 0xc1, 0x45,
-	0x44, 0x42, 0xbb, 0x4f, 0xc7, 0xf2, 0xad, 0x94, 0x5f, 0xf4, 0x36, 0x3e, 0x08, 0x9c, 0x68, 0xac,
-	0x82, 0xf7, 0x8c, 0x8e, 0xf1, 0x9a, 0x16, 0xd0, 0x49, 0xf8, 0xcf, 0xe8, 0x98, 0xa3, 0x47, 0xb0,
-	0x41, 0xa7, 0x30, 0x29, 0xd1, 0xf6, 0xc9, 0x40, 0xce, 0x7a, 0xdb, 0xf1, 0x99, 0xd3, 0x57, 0xe3,
-	0xc6, 0xc4, 0x37, 0x69, 0x5a, 0xd4, 0x2f, 0x63, 0xc4, 0x9e, 0x04, 0x20, 0x0e, 0xd6, 0x91, 0x4f,
-	0x9c, 0xbe, 0xef, 0x71, 0xf9, 0xef, 0x4f, 0xea, 0xb9, 0x2b, 0x27, 0x86, 0xb4, 0x6d, 0xe7, 0x82,
-	0x68, 0xb5, 0x3a, 0x33, 0x6e, 0xea, 0xf1, 0xac, 0x2b, 0xea, 0xdb, 0x47, 0xd9, 0xa7, 0xa8, 0x03,
-	0x95, 0x61, 0x20, 0xd5, 0xc7, 0x31, 0x28, 0x5f, 0x35, 0x06, 0x10, 0xb3, 0xa4, 0xe7, 0xb5, 0x11,
-	0x6c, 0x5c, 0xa4, 0x3c, 0xa3, 0x36, 0x1f, 0xa7, 0x6b, 0xb3, 0xb2, 0x7d, 0x27, 0x4b, 0x5f, 0xb6,
-	0xc8, 0x54, 0x1d, 0x67, 0xa6, 0xed, 0xdf, 0x0c, 0x28, 0xbc, 0xa4, 0x4e, 0x44, 0xc5, 0x17, 0xcd,
-	0xda, 0x9d, 0x33, 0x59, 0x5b, 0xcf, 0x7e, 0x08, 0x4b, 0xad, 0x73, 0x49, 0x5b, 0x83, 0x92, 0x17,
-	0x08, 0x1a, 0x05, 0xc4, 0x57, 0x59, 0x5b, 0xc2, 0xd3, 0x7d, 0xa6, 0x03, 0x1f, 0x0c, 0x28, 0xc4,
-	0x2f, 0xc5, 0xeb, 0x76, 0x20, 0xd6, 0x7a, 0xde, 0x81, 0x4c, 0x23, 0xff, 0x63, 0x40, 0x29, 0x19,
-	0x58, 0x5f, 0xd4, 0xcc, 0x73, 0x2f, 0xaf, 0xfc, 0xff, 0xfd, 0xf2, 0x42, 0x60, 0xf6, 0xbd, 0x40,
-	0xbf, 0x11, 0xb1, 0x5a, 0xa3, 0x16, 0x14, 0x43, 0x32, 0xf6, 0x19, 0x71, 0x75, 0xa3, 0x5c, 0x9b,
-	0xfb, 0x61, 0xa1, 0x1d, 0x8c, 0x71, 0x02, 0xda, 0x5d, 0x3b, 0x3e, 0x69, 0xae, 0x42, 0x35, 0xed,
-	0xf9, 0x3b, 0xa3, 0xf9, 0x4f, 0x03, 0xca, 0x07, 0xbf, 0x17, 0x34, 0x50, 0xef, 0x81, 0xaf, 0xa5,
-	0xf3, 0x9b, 0xf3, 0x3f, 0x3e, 0x94, 0xcf, 0xfc, 0xae, 0x90, 0x75, 0xa9, 0x1d, 0xeb, 0xe3, 0xa7,
-	0xfa, 0x8d, 0x7f, 0x7d, 0xaa, 0xdf, 0xf8, 0xc3, 0xa4, 0x6e, 0x7c, 0x9c, 0xd4, 0x8d, 0x7f, 0x4c,
-	0xea, 0xc6, 0xbf, 0x27, 0x75, 0xe3, 0xa8, 0xa0, 0xe2, 0xf3, 0xa3, 0xff, 0x05, 0x00, 0x00, 0xff,
-	0xff, 0x34, 0x0b, 0x7d, 0x79, 0x43, 0x13, 0x00, 0x00,
+	// 1527 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0xcf, 0x6f, 0x1b, 0x4f,
+	0x15, 0xef, 0xda, 0x1b, 0xff, 0x78, 0x4e, 0x4c, 0x98, 0x86, 0xb0, 0x35, 0xc1, 0x0e, 0xae, 0x40,
+	0x55, 0x55, 0x39, 0x25, 0x14, 0x48, 0x03, 0xa5, 0xb5, 0x93, 0xa8, 0xb5, 0x4a, 0x69, 0x34, 0x2d,
+	0x2d, 0xb7, 0x65, 0xb2, 0x3b, 0x75, 0x17, 0xaf, 0x77, 0x56, 0x3b, 0x63, 0x17, 0xdf, 0x7a, 0x0e,
+	0x7f, 0x40, 0x6e, 0x1c, 0xfa, 0x37, 0x70, 0xe1, 0xc2, 0x81, 0x53, 0x8f, 0x9c, 0x10, 0xa7, 0x88,
+	0xfa, 0xbf, 0x40, 0xe2, 0x80, 0x66, 0x76, 0xd6, 0xde, 0xc4, 0x9b, 0x5f, 0xa8, 0x8a, 0xbe, 0xa7,
+	0xcc, 0xec, 0x7c, 0x3e, 0xef, 0xd7, 0xbc, 0xf7, 0xe6, 0xc5, 0x70, 0xaf, 0xe7, 0x89, 0xf7, 0xc3,
+	0x83, 0x96, 0xc3, 0x06, 0x1b, 0x2e, 0x73, 0xfa, 0x34, 0xda, 0xe0, 0x1f, 0x48, 0x34, 0xe8, 0x7b,
+	0x62, 0x83, 0x84, 0xde, 0x06, 0x3b, 0xf8, 0x03, 0x75, 0x04, 0x6f, 0x85, 0x11, 0x13, 0x0c, 0xa1,
+	0x18, 0xd2, 0x4a, 0x20, 0xad, 0xd1, 0x8f, 0x6b, 0x77, 0x2f, 0x90, 0x20, 0xc6, 0x21, 0xd5, 0xfc,
+	0x0b, 0xb1, 0x3c, 0xa4, 0x4e, 0x82, 0x6d, 0xf4, 0x18, 0xeb, 0xf9, 0x74, 0x43, 0xed, 0x0e, 0x86,
+	0xef, 0x36, 0x84, 0x37, 0xa0, 0x5c, 0x90, 0x41, 0xa8, 0x01, 0x2b, 0x3d, 0xd6, 0x63, 0x6a, 0xb9,
+	0x21, 0x57, 0xfa, 0xeb, 0xad, 0xd3, 0x34, 0x12, 0x8c, 0xf5, 0xd1, 0xcf, 0xcf, 0xd1, 0x3e, 0x85,
+	0x87, 0xfe, 0xb0, 0xe7, 0x05, 0xfa, 0x4f, 0x4c, 0x6c, 0xfe, 0xd5, 0x00, 0xf3, 0x05, 0x15, 0x04,
+	0xfd, 0x02, 0x8a, 0x23, 0x1a, 0x71, 0x8f, 0x05, 0x96, 0xb1, 0x6e, 0xdc, 0xa9, 0x6c, 0x7e, 0xaf,
+	0x35, 0x1f, 0x91, 0xd6, 0x9b, 0x18, 0xd2, 0x31, 0x3f, 0x1f, 0x37, 0x6e, 0xe0, 0x84, 0x81, 0x1e,
+	0x02, 0x38, 0x11, 0x25, 0x82, 0xba, 0x36, 0x11, 0x56, 0x4e, 0xf1, 0x6b, 0xad, 0xd8, 0xdc, 0x56,
+	0xa2, 0xbf, 0xf5, 0x3a, 0xf1, 0x12, 0x97, 0x35, 0xba, 0x2d, 0x24, 0x75, 0x18, 0xba, 0x09, 0x35,
+	0x7f, 0x31, 0x55, 0xa3, 0xdb, 0xa2, 0xf9, 0x71, 0x01, 0xcc, 0xdf, 0x30, 0x97, 0xa2, 0x55, 0xc8,
+	0x79, 0xae, 0x32, 0xbb, 0xdc, 0x29, 0x4c, 0x8e, 0x1b, 0xb9, 0xee, 0x2e, 0xce, 0x79, 0x2e, 0xda,
+	0x04, 0x73, 0x40, 0x05, 0xd1, 0x06, 0x59, 0x59, 0x0e, 0x49, 0xdf, 0xb5, 0x37, 0x0a, 0x8b, 0x7e,
+	0x06, 0xa6, 0xbc, 0x2a, 0x6d, 0xc9, 0x5a, 0x16, 0x47, 0xea, 0x7c, 0x15, 0x52, 0x27, 0xe1, 0x49,
+	0x3c, 0xda, 0x83, 0x8a, 0x4b, 0xb9, 0x13, 0x79, 0xa1, 0x90, 0x31, 0x34, 0x15, 0xfd, 0xf6, 0x59,
+	0xf4, 0xdd, 0x19, 0x14, 0xa7, 0x79, 0xe8, 0x97, 0x50, 0xe0, 0x82, 0x88, 0x21, 0xb7, 0x16, 0x94,
+	0x84, 0xfa, 0x99, 0x06, 0x28, 0x94, 0x36, 0x41, 0x73, 0xd0, 0x33, 0xa8, 0x0e, 0x48, 0x40, 0x7a,
+	0x34, 0xb2, 0xb5, 0x94, 0x82, 0x92, 0xf2, 0x83, 0x4c, 0xd7, 0x63, 0x64, 0x2c, 0x08, 0x2f, 0x0d,
+	0xd2, 0x5b, 0xd4, 0x05, 0x20, 0x42, 0x10, 0xe7, 0xfd, 0x80, 0x06, 0xc2, 0x2a, 0x2a, 0x29, 0x3f,
+	0xcc, 0xb4, 0x85, 0x8a, 0x0f, 0x2c, 0xea, 0xb7, 0xa7, 0xe0, 0x4e, 0xce, 0x32, 0x70, 0x8a, 0x8c,
+	0x9e, 0x42, 0xc5, 0xa1, 0x91, 0xf0, 0xde, 0x79, 0x0e, 0x11, 0xd4, 0x2a, 0x29, 0x59, 0x8d, 0x2c,
+	0x59, 0x3b, 0x33, 0x98, 0x76, 0x2c, 0xcd, 0x44, 0xf7, 0xc1, 0x8c, 0x98, 0x4f, 0xad, 0xf2, 0xba,
+	0x71, 0xa7, 0x7a, 0xf6, 0xd5, 0x60, 0xe6, 0x53, 0xac, 0x90, 0x52, 0xf5, 0xcc, 0x10, 0x6e, 0xc1,
+	0x7a, 0xfe, 0xd2, 0x6e, 0xe0, 0x34, 0x73, 0x7b, 0xf5, 0xf0, 0xa8, 0x89, 0x60, 0xb9, 0x64, 0x2c,
+	0x1b, 0x2a, 0xcf, 0x8c, 0xfb, 0xc6, 0xef, 0x8c, 0xdf, 0x1b, 0xcd, 0xff, 0xe6, 0xa1, 0xf8, 0x8a,
+	0x46, 0x23, 0xcf, 0xf9, 0xba, 0x59, 0xf8, 0xf0, 0x44, 0x16, 0x66, 0x06, 0x4b, 0xab, 0x9d, 0x4b,
+	0xc4, 0x2d, 0x28, 0xd1, 0xc0, 0x0d, 0x99, 0x17, 0x08, 0x9d, 0x85, 0x99, 0x91, 0xda, 0xd3, 0x18,
+	0x3c, 0x45, 0xa3, 0x3d, 0x58, 0x8a, 0x8b, 0xcb, 0x3e, 0x91, 0x82, 0xeb, 0x59, 0xf4, 0xdf, 0x2a,
+	0xa0, 0xce, 0x9d, 0xc5, 0x61, 0x6a, 0x87, 0x76, 0x61, 0x29, 0x8c, 0xe8, 0xc8, 0x63, 0x43, 0x6e,
+	0x2b, 0x27, 0x0a, 0x97, 0x72, 0x02, 0x2f, 0x26, 0x2c, 0xb9, 0x43, 0xbf, 0x82, 0x45, 0x49, 0xb6,
+	0x93, 0xa6, 0x04, 0x17, 0x36, 0x25, 0x5c, 0x91, 0x04, 0xbd, 0x41, 0x2f, 0xe1, 0x3b, 0x27, 0xac,
+	0x98, 0x0a, 0xaa, 0x5c, 0x2c, 0xe8, 0x66, 0xda, 0x12, 0xfd, 0x71, 0x1b, 0x1d, 0x1e, 0x35, 0xab,
+	0xb0, 0x98, 0x4e, 0x81, 0xe6, 0x9f, 0x73, 0x50, 0x4a, 0x02, 0x89, 0x1e, 0xe8, 0x3b, 0x33, 0xce,
+	0x8e, 0x5a, 0x82, 0x55, 0xfe, 0xc6, 0xd7, 0xf5, 0x00, 0x16, 0x42, 0x16, 0x09, 0x6e, 0xe5, 0x54,
+	0x72, 0x66, 0xd6, 0xfb, 0x3e, 0x8b, 0xc4, 0x0e, 0x0b, 0xde, 0x79, 0x3d, 0x1c, 0x83, 0xd1, 0x5b,
+	0xa8, 0x8c, 0xbc, 0x48, 0x0c, 0x89, 0x6f, 0x7b, 0x21, 0xb7, 0xf2, 0x8a, 0xfb, 0xa3, 0xf3, 0x54,
+	0xb6, 0xde, 0xc4, 0xf8, 0xee, 0x7e, 0xa7, 0x3a, 0x39, 0x6e, 0xc0, 0x74, 0xcb, 0x31, 0x68, 0x51,
+	0xdd, 0x90, 0xd7, 0x5e, 0x40, 0x79, 0x7a, 0x82, 0xee, 0x01, 0x04, 0x71, 0x5d, 0xd8, 0xd3, 0xcc,
+	0x5e, 0x9a, 0x1c, 0x37, 0xca, 0xba, 0x5a, 0xba, 0xbb, 0xb8, 0xac, 0x01, 0x5d, 0x17, 0x21, 0x30,
+	0x89, 0xeb, 0x46, 0x2a, 0xcf, 0xcb, 0x58, 0xad, 0x9b, 0x7f, 0x2a, 0x82, 0xf9, 0x9a, 0xf0, 0xfe,
+	0x75, 0xb7, 0x68, 0xa9, 0x73, 0xae, 0x32, 0xee, 0x01, 0xf0, 0x38, 0xdf, 0xa4, 0x3b, 0xe6, 0xcc,
+	0x1d, 0x9d, 0x85, 0xd2, 0x1d, 0x0d, 0x88, 0xdd, 0xe1, 0x3e, 0x13, 0xaa, 0x08, 0x4c, 0xac, 0xd6,
+	0xe8, 0x36, 0x14, 0x03, 0xe6, 0x2a, 0x7a, 0x41, 0xd1, 0x61, 0x72, 0xdc, 0x28, 0xc8, 0xa6, 0xd3,
+	0xdd, 0xc5, 0x05, 0x79, 0xd4, 0x75, 0x55, 0xd3, 0x09, 0x02, 0x26, 0x88, 0x6c, 0xe8, 0x5c, 0xf7,
+	0xce, 0xcc, 0xec, 0x6f, 0xcf, 0x60, 0x49, 0xbf, 0x4b, 0x31, 0xd1, 0x1b, 0xb8, 0x99, 0xd8, 0x9b,
+	0x16, 0x58, 0xba, 0x8a, 0x40, 0xa4, 0x25, 0xa4, 0x4e, 0x52, 0x6f, 0x4c, 0xf9, 0xec, 0x37, 0x46,
+	0x45, 0x30, 0xeb, 0x8d, 0xe9, 0xc0, 0x92, 0x4b, 0xb9, 0x17, 0x51, 0x57, 0xb5, 0x09, 0xaa, 0x2a,
+	0xb3, 0xba, 0xf9, 0xfd, 0xf3, 0x84, 0x50, 0xbc, 0xa8, 0x39, 0x6a, 0x87, 0xda, 0x50, 0xd2, 0x79,
+	0xc3, 0xad, 0xca, 0x55, 0x9a, 0xf2, 0x94, 0x76, 0xa2, 0xcd, 0x2d, 0x5e, 0xa9, 0xcd, 0x3d, 0x04,
+	0xf0, 0x59, 0xcf, 0x76, 0x23, 0x6f, 0x44, 0x23, 0x6b, 0x49, 0x4f, 0x1c, 0x19, 0xdc, 0x5d, 0x85,
+	0xc0, 0x65, 0x9f, 0xf5, 0xe2, 0xe5, 0x5c, 0x53, 0xaa, 0x5e, 0xb1, 0x29, 0x11, 0xa8, 0x11, 0xce,
+	0xbd, 0x5e, 0x40, 0x5d, 0xbb, 0x47, 0x03, 0x1a, 0x79, 0x8e, 0x1d, 0x51, 0xce, 0x86, 0x91, 0x43,
+	0xb9, 0xf5, 0x2d, 0x15, 0x89, 0xcc, 0x99, 0xe1, 0x69, 0x0c, 0xc6, 0x1a, 0x8b, 0xad, 0x44, 0xcc,
+	0xa9, 0x03, 0xbe, 0x5d, 0x3b, 0x3c, 0x6a, 0xae, 0xc2, 0x4a, 0xba, 0x4d, 0x6d, 0x19, 0x4f, 0x8c,
+	0x67, 0xc6, 0xbe, 0xd1, 0xfc, 0x7b, 0x0e, 0xbe, 0x3d, 0x17, 0x53, 0xf4, 0x53, 0x28, 0xea, 0xa8,
+	0x9e, 0x37, 0xf9, 0x69, 0x1e, 0x4e, 0xb0, 0x68, 0x0d, 0xca, 0xb2, 0xc4, 0x29, 0xe7, 0x34, 0x6e,
+	0x5e, 0x65, 0x3c, 0xfb, 0x80, 0x2c, 0x28, 0x12, 0xdf, 0x23, 0xf2, 0x2c, 0xaf, 0xce, 0x92, 0x2d,
+	0x1a, 0xc2, 0x6a, 0x1c, 0x7a, 0x7b, 0xf6, 0xc0, 0xda, 0x2c, 0x14, 0xdc, 0x32, 0x95, 0xff, 0x8f,
+	0x2f, 0x95, 0x09, 0xfa, 0x72, 0x66, 0x1f, 0x5e, 0x86, 0x82, 0xef, 0x05, 0x22, 0x1a, 0xe3, 0x15,
+	0x37, 0xe3, 0xa8, 0xf6, 0x14, 0x6e, 0x9d, 0x49, 0x41, 0xcb, 0x90, 0xef, 0xd3, 0x71, 0xdc, 0x9e,
+	0xb0, 0x5c, 0xa2, 0x15, 0x58, 0x18, 0x11, 0x7f, 0x48, 0x75, 0x37, 0x8b, 0x37, 0xdb, 0xb9, 0x2d,
+	0xa3, 0xf9, 0x29, 0x07, 0x45, 0x6d, 0xce, 0x75, 0x3f, 0xf9, 0x5a, 0xed, 0x5c, 0x63, 0x7b, 0x04,
+	0x8b, 0x3a, 0xa4, 0x71, 0x45, 0x9a, 0x17, 0xe6, 0x74, 0x25, 0xc6, 0xc7, 0xd5, 0xf8, 0x08, 0x4c,
+	0x2f, 0x24, 0x03, 0xfd, 0xdc, 0x67, 0x6a, 0xee, 0xee, 0xb7, 0x5f, 0xbc, 0x0c, 0xe3, 0xc6, 0x52,
+	0x9a, 0x1c, 0x37, 0x4c, 0xf9, 0x01, 0x2b, 0x5a, 0xe6, 0xc3, 0xf8, 0x97, 0x05, 0x28, 0xee, 0xf8,
+	0x43, 0x2e, 0x68, 0x74, 0xdd, 0x41, 0xd2, 0x6a, 0xe7, 0x82, 0xb4, 0x03, 0xc5, 0x88, 0x31, 0x61,
+	0x3b, 0xe4, 0xbc, 0xf8, 0x60, 0xc6, 0xc4, 0x4e, 0xbb, 0x53, 0x95, 0x44, 0xd9, 0xdb, 0xe3, 0x3d,
+	0x2e, 0x48, 0xea, 0x0e, 0x41, 0x6f, 0x61, 0x35, 0x79, 0x11, 0x0f, 0x18, 0x13, 0x5c, 0x44, 0x24,
+	0xb4, 0xfb, 0x74, 0x2c, 0x67, 0xa5, 0xfc, 0x59, 0x83, 0xf6, 0x5e, 0xe0, 0x44, 0x63, 0x15, 0xbc,
+	0xe7, 0x74, 0x8c, 0x57, 0xb4, 0x80, 0x4e, 0xc2, 0x7f, 0x4e, 0xc7, 0x1c, 0x3d, 0x86, 0x35, 0x3a,
+	0x85, 0x49, 0x89, 0xb6, 0x4f, 0x06, 0xf2, 0xad, 0xb7, 0x1d, 0x9f, 0x39, 0x7d, 0xf5, 0xdc, 0x98,
+	0xf8, 0x16, 0x4d, 0x8b, 0xfa, 0x75, 0x8c, 0xd8, 0x91, 0x00, 0xc4, 0xc1, 0x3a, 0xf0, 0x89, 0xd3,
+	0xf7, 0x3d, 0x2e, 0xff, 0x97, 0x4a, 0xcd, 0xcd, 0xf2, 0xc5, 0x90, 0xb6, 0x6d, 0x9d, 0x13, 0xad,
+	0x56, 0x67, 0xc6, 0x4d, 0x4d, 0xe1, 0xba, 0xa2, 0xbe, 0x7b, 0x90, 0x7d, 0x8a, 0x3a, 0x50, 0x19,
+	0x06, 0x52, 0x7d, 0x1c, 0x83, 0xf2, 0x65, 0x63, 0x00, 0x31, 0x4b, 0x7a, 0x5e, 0x1b, 0xc1, 0xda,
+	0x79, 0xca, 0x33, 0x6a, 0xf3, 0x49, 0xba, 0x36, 0x2b, 0x9b, 0x77, 0xb3, 0xf4, 0x65, 0x8b, 0x4c,
+	0xd5, 0x71, 0x66, 0xda, 0xfe, 0xcd, 0x80, 0xc2, 0x2b, 0xea, 0x44, 0x54, 0x7c, 0xd5, 0xac, 0xdd,
+	0x3a, 0x91, 0xb5, 0xf5, 0xec, 0x41, 0x58, 0x6a, 0x9d, 0x4b, 0xda, 0x1a, 0x94, 0xbc, 0x40, 0xd0,
+	0x28, 0x20, 0xbe, 0xca, 0xda, 0x12, 0x9e, 0xee, 0x33, 0x1d, 0xf8, 0x64, 0x40, 0x21, 0x9e, 0x14,
+	0xaf, 0xdb, 0x81, 0x58, 0xeb, 0x69, 0x07, 0x32, 0x8d, 0xfc, 0x8f, 0x01, 0xa5, 0xe4, 0xc1, 0xfa,
+	0xaa, 0x66, 0x9e, 0x9a, 0xbc, 0xf2, 0xff, 0xf7, 0xe4, 0x85, 0xc0, 0xec, 0x7b, 0x81, 0x9e, 0x11,
+	0xb1, 0x5a, 0xa3, 0x16, 0x14, 0x43, 0x32, 0xf6, 0x19, 0x71, 0x75, 0xa3, 0x5c, 0x99, 0xfb, 0x95,
+	0xa2, 0x1d, 0x8c, 0x71, 0x02, 0xda, 0x5e, 0x39, 0x3c, 0x6a, 0x2e, 0x43, 0x35, 0xed, 0xf9, 0x7b,
+	0xa3, 0xf9, 0x4f, 0x03, 0xca, 0x7b, 0x7f, 0x14, 0x34, 0x50, 0xf3, 0xc0, 0x37, 0xd2, 0xf9, 0xf5,
+	0xf9, 0x5f, 0x32, 0xca, 0x27, 0x7e, 0xa4, 0xc8, 0xba, 0xd4, 0x8e, 0xf5, 0xf9, 0x4b, 0xfd, 0xc6,
+	0xbf, 0xbe, 0xd4, 0x6f, 0x7c, 0x9c, 0xd4, 0x8d, 0xcf, 0x93, 0xba, 0xf1, 0x8f, 0x49, 0xdd, 0xf8,
+	0xf7, 0xa4, 0x6e, 0x1c, 0x14, 0x54, 0x7c, 0x7e, 0xf2, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf9,
+	0xa1, 0x26, 0x54, 0x90, 0x13, 0x00, 0x00,
 }
diff --git a/vendor/github.com/docker/swarmkit/api/objects.proto b/vendor/github.com/docker/swarmkit/api/objects.proto
index fd5aff7..c3ee419 100644
--- a/vendor/github.com/docker/swarmkit/api/objects.proto
+++ b/vendor/github.com/docker/swarmkit/api/objects.proto
@@ -59,8 +59,9 @@
 	// component, if the node is a manager.
 	ManagerStatus manager_status = 6;
 
+	// DEPRECATED: Use lb_attachments to find the ingress network
 	// The node attachment to the ingress network.
-	NetworkAttachment attachment = 7;
+	NetworkAttachment attachment = 7 [deprecated=true];
 
 	// Certificate is the TLS certificate issued for the node, if any.
 	Certificate certificate = 8 [(gogoproto.nullable) = false];
@@ -75,6 +76,11 @@
 	// shows the privilege level that the CA would currently grant when
 	// issuing or renewing the node's certificate.
 	NodeRole role = 9;
+
+	// Attachments enumerates the network attachments for the node to set up an
+	// endpoint on the node to be used for load balancing. Each overlay
+	// network, including ingress network, will have an NetworkAttachment.
+	repeated NetworkAttachment attachments = 10;
 }
 
 message Service {
@@ -257,7 +263,7 @@
 
 	// List of aliases by which a task is resolved in a network
 	repeated string aliases = 3;
-	
+
 	// Map of all the driver attachment options for this network
 	map<string,string> driver_attachment_opts = 4;
 }
diff --git a/vendor/github.com/docker/swarmkit/api/specs.pb.go b/vendor/github.com/docker/swarmkit/api/specs.pb.go
index 3d000fb..dfd18a6 100644
--- a/vendor/github.com/docker/swarmkit/api/specs.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/specs.pb.go
@@ -10,6 +10,7 @@
 import _ "github.com/gogo/protobuf/gogoproto"
 import google_protobuf1 "github.com/gogo/protobuf/types"
 import google_protobuf3 "github.com/gogo/protobuf/types"
+import google_protobuf4 "github.com/gogo/protobuf/types"
 
 import github_com_docker_swarmkit_api_deepcopy "github.com/docker/swarmkit/api/deepcopy"
 
@@ -74,6 +75,35 @@
 }
 func (NodeSpec_Availability) EnumDescriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{0, 1} }
 
+type ContainerSpec_Isolation int32
+
+const (
+	// ISOLATION_DEFAULT uses whatever default value from the container runtime
+	ContainerIsolationDefault ContainerSpec_Isolation = 0
+	// ISOLATION_PROCESS forces windows container isolation
+	ContainerIsolationProcess ContainerSpec_Isolation = 1
+	// ISOLATION_HYPERV forces Hyper-V isolation
+	ContainerIsolationHyperV ContainerSpec_Isolation = 2
+)
+
+var ContainerSpec_Isolation_name = map[int32]string{
+	0: "ISOLATION_DEFAULT",
+	1: "ISOLATION_PROCESS",
+	2: "ISOLATION_HYPERV",
+}
+var ContainerSpec_Isolation_value = map[string]int32{
+	"ISOLATION_DEFAULT": 0,
+	"ISOLATION_PROCESS": 1,
+	"ISOLATION_HYPERV":  2,
+}
+
+func (x ContainerSpec_Isolation) String() string {
+	return proto.EnumName(ContainerSpec_Isolation_name, int32(x))
+}
+func (ContainerSpec_Isolation) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptorSpecs, []int{8, 0}
+}
+
 // ResolutionMode specifies the mode of resolution to use for
 // internal loadbalancing between tasks which are all within
 // the cluster. This is sometimes calls east-west data path.
@@ -542,6 +572,8 @@
 	Groups []string `protobuf:"bytes,11,rep,name=groups" json:"groups,omitempty"`
 	// Privileges specifies security configuration/permissions.
 	Privileges *Privileges `protobuf:"bytes,22,opt,name=privileges" json:"privileges,omitempty"`
+	// Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings
+	Init *google_protobuf4.BoolValue `protobuf:"bytes,23,opt,name=init" json:"init,omitempty"`
 	// TTY declares that a TTY should be attached to the standard streams,
 	// including stdin if it is still open.
 	TTY bool `protobuf:"varint,13,opt,name=tty,proto3" json:"tty,omitempty"`
@@ -585,97 +617,18 @@
 	// task will exit and a new task will be rescheduled elsewhere. A container
 	// is considered unhealthy after `Retries` number of consecutive failures.
 	Healthcheck *HealthConfig `protobuf:"bytes,16,opt,name=healthcheck" json:"healthcheck,omitempty"`
-	// Run a custom init inside the container, if null, use the daemon's configured settings
-	//
-	// Types that are valid to be assigned to Init:
-	//	*ContainerSpec_InitValue
-	Init isContainerSpec_Init `protobuf_oneof:"init"`
+	// Isolation defines the isolation level for windows containers (default, process, hyperv).
+	// Runtimes that don't support it ignore that field
+	Isolation ContainerSpec_Isolation `protobuf:"varint,24,opt,name=isolation,proto3,enum=docker.swarmkit.v1.ContainerSpec_Isolation" json:"isolation,omitempty"`
+	// PidsLimit prevents from OS resource damage by applications inside the container
+	// using fork bomb attack.
+	PidsLimit int64 `protobuf:"varint,25,opt,name=pidsLimit,proto3" json:"pidsLimit,omitempty"`
 }
 
 func (m *ContainerSpec) Reset()                    { *m = ContainerSpec{} }
 func (*ContainerSpec) ProtoMessage()               {}
 func (*ContainerSpec) Descriptor() ([]byte, []int) { return fileDescriptorSpecs, []int{8} }
 
-type isContainerSpec_Init interface {
-	isContainerSpec_Init()
-	MarshalTo([]byte) (int, error)
-	Size() int
-}
-
-type ContainerSpec_InitValue struct {
-	InitValue bool `protobuf:"varint,23,opt,name=init_value,json=initValue,proto3,oneof"`
-}
-
-func (*ContainerSpec_InitValue) isContainerSpec_Init() {}
-
-func (m *ContainerSpec) GetInit() isContainerSpec_Init {
-	if m != nil {
-		return m.Init
-	}
-	return nil
-}
-
-func (m *ContainerSpec) GetInitValue() bool {
-	if x, ok := m.GetInit().(*ContainerSpec_InitValue); ok {
-		return x.InitValue
-	}
-	return false
-}
-
-// XXX_OneofFuncs is for the internal use of the proto package.
-func (*ContainerSpec) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
-	return _ContainerSpec_OneofMarshaler, _ContainerSpec_OneofUnmarshaler, _ContainerSpec_OneofSizer, []interface{}{
-		(*ContainerSpec_InitValue)(nil),
-	}
-}
-
-func _ContainerSpec_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
-	m := msg.(*ContainerSpec)
-	// init
-	switch x := m.Init.(type) {
-	case *ContainerSpec_InitValue:
-		t := uint64(0)
-		if x.InitValue {
-			t = 1
-		}
-		_ = b.EncodeVarint(23<<3 | proto.WireVarint)
-		_ = b.EncodeVarint(t)
-	case nil:
-	default:
-		return fmt.Errorf("ContainerSpec.Init has unexpected type %T", x)
-	}
-	return nil
-}
-
-func _ContainerSpec_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
-	m := msg.(*ContainerSpec)
-	switch tag {
-	case 23: // init.init_value
-		if wire != proto.WireVarint {
-			return true, proto.ErrInternalBadWireType
-		}
-		x, err := b.DecodeVarint()
-		m.Init = &ContainerSpec_InitValue{x != 0}
-		return true, err
-	default:
-		return false, nil
-	}
-}
-
-func _ContainerSpec_OneofSizer(msg proto.Message) (n int) {
-	m := msg.(*ContainerSpec)
-	// init
-	switch x := m.Init.(type) {
-	case *ContainerSpec_InitValue:
-		n += proto.SizeVarint(23<<3 | proto.WireVarint)
-		n += 1
-	case nil:
-	default:
-		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
-	}
-	return n
-}
-
 // PullOptions allows one to parameterize an image pull.
 type ContainerSpec_PullOptions struct {
 	// RegistryAuth is the registry auth token obtained from the client, required
@@ -915,6 +868,7 @@
 	proto.RegisterType((*ConfigSpec)(nil), "docker.swarmkit.v1.ConfigSpec")
 	proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Membership", NodeSpec_Membership_name, NodeSpec_Membership_value)
 	proto.RegisterEnum("docker.swarmkit.v1.NodeSpec_Availability", NodeSpec_Availability_name, NodeSpec_Availability_value)
+	proto.RegisterEnum("docker.swarmkit.v1.ContainerSpec_Isolation", ContainerSpec_Isolation_name, ContainerSpec_Isolation_value)
 	proto.RegisterEnum("docker.swarmkit.v1.EndpointSpec_ResolutionMode", EndpointSpec_ResolutionMode_name, EndpointSpec_ResolutionMode_value)
 }
 
@@ -1175,6 +1129,10 @@
 		m.Privileges = &Privileges{}
 		github_com_docker_swarmkit_api_deepcopy.Copy(m.Privileges, o.Privileges)
 	}
+	if o.Init != nil {
+		m.Init = &google_protobuf4.BoolValue{}
+		github_com_docker_swarmkit_api_deepcopy.Copy(m.Init, o.Init)
+	}
 	if o.Mounts != nil {
 		m.Mounts = make([]Mount, len(o.Mounts))
 		for i := range m.Mounts {
@@ -1219,16 +1177,6 @@
 		m.Healthcheck = &HealthConfig{}
 		github_com_docker_swarmkit_api_deepcopy.Copy(m.Healthcheck, o.Healthcheck)
 	}
-	if o.Init != nil {
-		switch o.Init.(type) {
-		case *ContainerSpec_InitValue:
-			v := ContainerSpec_InitValue{
-				InitValue: o.GetInitValue(),
-			}
-			m.Init = &v
-		}
-	}
-
 }
 
 func (m *ContainerSpec_PullOptions) Copy() *ContainerSpec_PullOptions {
@@ -2092,29 +2040,34 @@
 		i += n23
 	}
 	if m.Init != nil {
-		nn24, err := m.Init.MarshalTo(dAtA[i:])
+		dAtA[i] = 0xba
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintSpecs(dAtA, i, uint64(m.Init.Size()))
+		n24, err := m.Init.MarshalTo(dAtA[i:])
 		if err != nil {
 			return 0, err
 		}
-		i += nn24
+		i += n24
+	}
+	if m.Isolation != 0 {
+		dAtA[i] = 0xc0
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintSpecs(dAtA, i, uint64(m.Isolation))
+	}
+	if m.PidsLimit != 0 {
+		dAtA[i] = 0xc8
+		i++
+		dAtA[i] = 0x1
+		i++
+		i = encodeVarintSpecs(dAtA, i, uint64(m.PidsLimit))
 	}
 	return i, nil
 }
 
-func (m *ContainerSpec_InitValue) MarshalTo(dAtA []byte) (int, error) {
-	i := 0
-	dAtA[i] = 0xb8
-	i++
-	dAtA[i] = 0x1
-	i++
-	if m.InitValue {
-		dAtA[i] = 1
-	} else {
-		dAtA[i] = 0
-	}
-	i++
-	return i, nil
-}
 func (m *ContainerSpec_PullOptions) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -2838,17 +2791,18 @@
 		n += 2 + l + sovSpecs(uint64(l))
 	}
 	if m.Init != nil {
-		n += m.Init.Size()
+		l = m.Init.Size()
+		n += 2 + l + sovSpecs(uint64(l))
+	}
+	if m.Isolation != 0 {
+		n += 2 + sovSpecs(uint64(m.Isolation))
+	}
+	if m.PidsLimit != 0 {
+		n += 2 + sovSpecs(uint64(m.PidsLimit))
 	}
 	return n
 }
 
-func (m *ContainerSpec_InitValue) Size() (n int) {
-	var l int
-	_ = l
-	n += 3
-	return n
-}
 func (m *ContainerSpec_PullOptions) Size() (n int) {
 	var l int
 	_ = l
@@ -3191,17 +3145,9 @@
 		`StopSignal:` + fmt.Sprintf("%v", this.StopSignal) + `,`,
 		`Configs:` + strings.Replace(fmt.Sprintf("%v", this.Configs), "ConfigReference", "ConfigReference", 1) + `,`,
 		`Privileges:` + strings.Replace(fmt.Sprintf("%v", this.Privileges), "Privileges", "Privileges", 1) + `,`,
-		`Init:` + fmt.Sprintf("%v", this.Init) + `,`,
-		`}`,
-	}, "")
-	return s
-}
-func (this *ContainerSpec_InitValue) String() string {
-	if this == nil {
-		return "nil"
-	}
-	s := strings.Join([]string{`&ContainerSpec_InitValue{`,
-		`InitValue:` + fmt.Sprintf("%v", this.InitValue) + `,`,
+		`Init:` + strings.Replace(fmt.Sprintf("%v", this.Init), "BoolValue", "google_protobuf4.BoolValue", 1) + `,`,
+		`Isolation:` + fmt.Sprintf("%v", this.Isolation) + `,`,
+		`PidsLimit:` + fmt.Sprintf("%v", this.PidsLimit) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -5278,10 +5224,10 @@
 			}
 			iNdEx = postIndex
 		case 23:
-			if wireType != 0 {
-				return fmt.Errorf("proto: wrong wireType = %d for field InitValue", wireType)
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Init", wireType)
 			}
-			var v int
+			var msglen int
 			for shift := uint(0); ; shift += 7 {
 				if shift >= 64 {
 					return ErrIntOverflowSpecs
@@ -5291,13 +5237,63 @@
 				}
 				b := dAtA[iNdEx]
 				iNdEx++
-				v |= (int(b) & 0x7F) << shift
+				msglen |= (int(b) & 0x7F) << shift
 				if b < 0x80 {
 					break
 				}
 			}
-			b := bool(v != 0)
-			m.Init = &ContainerSpec_InitValue{b}
+			if msglen < 0 {
+				return ErrInvalidLengthSpecs
+			}
+			postIndex := iNdEx + msglen
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Init == nil {
+				m.Init = &google_protobuf4.BoolValue{}
+			}
+			if err := m.Init.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 24:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Isolation", wireType)
+			}
+			m.Isolation = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Isolation |= (ContainerSpec_Isolation(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 25:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PidsLimit", wireType)
+			}
+			m.PidsLimit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowSpecs
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PidsLimit |= (int64(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
 		default:
 			iNdEx = preIndex
 			skippy, err := skipSpecs(dAtA[iNdEx:])
@@ -6609,130 +6605,139 @@
 func init() { proto.RegisterFile("github.com/docker/swarmkit/api/specs.proto", fileDescriptorSpecs) }
 
 var fileDescriptorSpecs = []byte{
-	// 2000 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4f, 0x6f, 0x1b, 0xb9,
-	0x15, 0xb7, 0x6c, 0x59, 0x7f, 0xde, 0xc8, 0x89, 0xc2, 0x4d, 0xb2, 0x13, 0xa5, 0x6b, 0x29, 0xda,
-	0x6c, 0xea, 0xdd, 0x45, 0x65, 0xd4, 0x2d, 0xb6, 0xd9, 0x4d, 0xb7, 0xad, 0x64, 0xa9, 0x8e, 0x9b,
-	0xc6, 0x11, 0x68, 0x6f, 0xda, 0x00, 0x05, 0x04, 0x7a, 0x86, 0x96, 0x08, 0x8f, 0x86, 0x53, 0x0e,
-	0xc7, 0x0b, 0xdd, 0x7a, 0x5c, 0xa4, 0x9f, 0x21, 0xe8, 0xa1, 0xe8, 0xbd, 0x1f, 0xa0, 0x1f, 0x20,
-	0xc7, 0x1e, 0xdb, 0x8b, 0xd1, 0xd5, 0x57, 0xe8, 0xad, 0x97, 0x16, 0xe4, 0x70, 0x46, 0xa3, 0x64,
-	0x1c, 0x07, 0x68, 0x0e, 0x7b, 0x23, 0xdf, 0xfc, 0x7e, 0x6f, 0x1e, 0xc9, 0xdf, 0xe3, 0x7b, 0x84,
-	0x4f, 0xc6, 0x4c, 0x4e, 0xa2, 0xe3, 0x8e, 0xc3, 0xa7, 0xdb, 0x2e, 0x77, 0x4e, 0xa9, 0xd8, 0x0e,
-	0xbf, 0x26, 0x62, 0x7a, 0xca, 0xe4, 0x36, 0x09, 0xd8, 0x76, 0x18, 0x50, 0x27, 0xec, 0x04, 0x82,
-	0x4b, 0x8e, 0x50, 0x0c, 0xe8, 0x24, 0x80, 0xce, 0xd9, 0x0f, 0x1b, 0x97, 0xf1, 0xe5, 0x2c, 0xa0,
-	0x86, 0xdf, 0xb8, 0x3e, 0xe6, 0x63, 0xae, 0x87, 0xdb, 0x6a, 0x64, 0xac, 0x9b, 0x63, 0xce, 0xc7,
-	0x1e, 0xdd, 0xd6, 0xb3, 0xe3, 0xe8, 0x64, 0xdb, 0x8d, 0x04, 0x91, 0x8c, 0xfb, 0xe6, 0xfb, 0xad,
-	0x57, 0xbf, 0x13, 0x7f, 0x16, 0x7f, 0x6a, 0xbf, 0x28, 0x42, 0xe5, 0x80, 0xbb, 0xf4, 0x30, 0xa0,
-	0x0e, 0xda, 0x03, 0x8b, 0xf8, 0x3e, 0x97, 0x9a, 0x1b, 0xda, 0x85, 0x56, 0x61, 0xcb, 0xda, 0x69,
-	0x76, 0x5e, 0x8f, 0xb9, 0xd3, 0x5d, 0xc0, 0x7a, 0xc5, 0x97, 0xe7, 0xcd, 0x15, 0x9c, 0x65, 0xa2,
-	0x9f, 0x43, 0xcd, 0xa5, 0x21, 0x13, 0xd4, 0x1d, 0x09, 0xee, 0x51, 0x7b, 0xb5, 0x55, 0xd8, 0xba,
-	0xb2, 0xf3, 0xbd, 0x3c, 0x4f, 0xea, 0xe7, 0x98, 0x7b, 0x14, 0x5b, 0x86, 0xa1, 0x26, 0x68, 0x0f,
-	0x60, 0x4a, 0xa7, 0xc7, 0x54, 0x84, 0x13, 0x16, 0xd8, 0x6b, 0x9a, 0xfe, 0xfd, 0x8b, 0xe8, 0x2a,
-	0xf6, 0xce, 0xe3, 0x14, 0x8e, 0x33, 0x54, 0xf4, 0x18, 0x6a, 0xe4, 0x8c, 0x30, 0x8f, 0x1c, 0x33,
-	0x8f, 0xc9, 0x99, 0x5d, 0xd4, 0xae, 0x3e, 0x7e, 0xa3, 0xab, 0x6e, 0x86, 0x80, 0x97, 0xe8, 0x6d,
-	0x17, 0x60, 0xf1, 0x23, 0x74, 0x0f, 0xca, 0xc3, 0xc1, 0x41, 0x7f, 0xff, 0x60, 0xaf, 0xbe, 0xd2,
-	0xb8, 0xf5, 0xfc, 0x45, 0xeb, 0x86, 0xf2, 0xb1, 0x00, 0x0c, 0xa9, 0xef, 0x32, 0x7f, 0x8c, 0xb6,
-	0xa0, 0xd2, 0xdd, 0xdd, 0x1d, 0x0c, 0x8f, 0x06, 0xfd, 0x7a, 0xa1, 0xd1, 0x78, 0xfe, 0xa2, 0x75,
-	0x73, 0x19, 0xd8, 0x75, 0x1c, 0x1a, 0x48, 0xea, 0x36, 0x8a, 0xdf, 0xfc, 0x79, 0x73, 0xa5, 0xfd,
-	0x4d, 0x01, 0x6a, 0xd9, 0x20, 0xd0, 0x3d, 0x28, 0x75, 0x77, 0x8f, 0xf6, 0x9f, 0x0e, 0xea, 0x2b,
-	0x0b, 0x7a, 0x16, 0xd1, 0x75, 0x24, 0x3b, 0xa3, 0xe8, 0x2e, 0xac, 0x0f, 0xbb, 0x5f, 0x1d, 0x0e,
-	0xea, 0x85, 0x45, 0x38, 0x59, 0xd8, 0x90, 0x44, 0xa1, 0x46, 0xf5, 0x71, 0x77, 0xff, 0xa0, 0xbe,
-	0x9a, 0x8f, 0xea, 0x0b, 0xc2, 0x7c, 0x13, 0xca, 0x9f, 0x8a, 0x60, 0x1d, 0x52, 0x71, 0xc6, 0x9c,
-	0x77, 0x2c, 0x91, 0xcf, 0xa0, 0x28, 0x49, 0x78, 0xaa, 0xa5, 0x61, 0xe5, 0x4b, 0xe3, 0x88, 0x84,
-	0xa7, 0xea, 0xa7, 0x86, 0xae, 0xf1, 0x4a, 0x19, 0x82, 0x06, 0x1e, 0x73, 0x88, 0xa4, 0xae, 0x56,
-	0x86, 0xb5, 0xf3, 0x51, 0x1e, 0x1b, 0xa7, 0x28, 0x13, 0xff, 0xc3, 0x15, 0x9c, 0xa1, 0xa2, 0x07,
-	0x50, 0x1a, 0x7b, 0xfc, 0x98, 0x78, 0x5a, 0x13, 0xd6, 0xce, 0x9d, 0x3c, 0x27, 0x7b, 0x1a, 0xb1,
-	0x70, 0x60, 0x28, 0xe8, 0x3e, 0x94, 0xa2, 0xc0, 0x25, 0x92, 0xda, 0x25, 0x4d, 0x6e, 0xe5, 0x91,
-	0xbf, 0xd2, 0x88, 0x5d, 0xee, 0x9f, 0xb0, 0x31, 0x36, 0x78, 0xf4, 0x08, 0x2a, 0x3e, 0x95, 0x5f,
-	0x73, 0x71, 0x1a, 0xda, 0xe5, 0xd6, 0xda, 0x96, 0xb5, 0xf3, 0x69, 0xae, 0x18, 0x63, 0x4c, 0x57,
-	0x4a, 0xe2, 0x4c, 0xa6, 0xd4, 0x97, 0xb1, 0x9b, 0xde, 0xaa, 0x5d, 0xc0, 0xa9, 0x03, 0xf4, 0x53,
-	0xa8, 0x50, 0xdf, 0x0d, 0x38, 0xf3, 0xa5, 0x5d, 0xb9, 0x38, 0x90, 0x81, 0xc1, 0xa8, 0xcd, 0xc4,
-	0x29, 0x43, 0xb1, 0x05, 0xf7, 0xbc, 0x63, 0xe2, 0x9c, 0xda, 0xd5, 0xb7, 0x5c, 0x46, 0xca, 0xe8,
-	0x95, 0xa0, 0x38, 0xe5, 0x2e, 0x6d, 0x6f, 0xc3, 0xb5, 0xd7, 0xb6, 0x1a, 0x35, 0xa0, 0x62, 0xb6,
-	0x3a, 0xd6, 0x48, 0x11, 0xa7, 0xf3, 0xf6, 0x55, 0xd8, 0x58, 0xda, 0xd6, 0xf6, 0x5f, 0xd7, 0xa1,
-	0x92, 0x9c, 0x35, 0xea, 0x42, 0xd5, 0xe1, 0xbe, 0x24, 0xcc, 0xa7, 0xc2, 0xc8, 0x2b, 0xf7, 0x64,
-	0x76, 0x13, 0x90, 0x62, 0x3d, 0x5c, 0xc1, 0x0b, 0x16, 0xfa, 0x25, 0x54, 0x05, 0x0d, 0x79, 0x24,
-	0x1c, 0x1a, 0x1a, 0x7d, 0x6d, 0xe5, 0x2b, 0x24, 0x06, 0x61, 0xfa, 0xfb, 0x88, 0x09, 0xaa, 0x76,
-	0x39, 0xc4, 0x0b, 0x2a, 0x7a, 0x00, 0x65, 0x41, 0x43, 0x49, 0x84, 0x7c, 0x93, 0x44, 0x70, 0x0c,
-	0x19, 0x72, 0x8f, 0x39, 0x33, 0x9c, 0x30, 0xd0, 0x03, 0xa8, 0x06, 0x1e, 0x71, 0xb4, 0x57, 0x7b,
-	0x5d, 0xd3, 0x3f, 0xc8, 0xa3, 0x0f, 0x13, 0x10, 0x5e, 0xe0, 0xd1, 0xe7, 0x00, 0x1e, 0x1f, 0x8f,
-	0x5c, 0xc1, 0xce, 0xa8, 0x30, 0x12, 0x6b, 0xe4, 0xb1, 0xfb, 0x1a, 0x81, 0xab, 0x1e, 0x1f, 0xc7,
-	0x43, 0xb4, 0xf7, 0x7f, 0xe9, 0x2b, 0xa3, 0xad, 0x47, 0x00, 0x24, 0xfd, 0x6a, 0xd4, 0xf5, 0xf1,
-	0x5b, 0xb9, 0x32, 0x27, 0x92, 0xa1, 0xa3, 0x3b, 0x50, 0x3b, 0xe1, 0xc2, 0xa1, 0x23, 0x93, 0x35,
-	0x55, 0xad, 0x09, 0x4b, 0xdb, 0x62, 0x7d, 0xa1, 0x1e, 0x94, 0xc7, 0xd4, 0xa7, 0x82, 0x39, 0x36,
-	0xe8, 0x9f, 0xdd, 0xcb, 0x4d, 0xc8, 0x18, 0x82, 0x23, 0x5f, 0xb2, 0x29, 0x35, 0x7f, 0x4a, 0x88,
-	0xe8, 0x77, 0xf0, 0x5e, 0x72, 0x7c, 0x23, 0x41, 0x4f, 0xa8, 0xa0, 0xbe, 0xd2, 0x80, 0xa5, 0xf7,
-	0xe1, 0xa3, 0x37, 0x6b, 0xc0, 0xa0, 0xcd, 0x65, 0x83, 0xc4, 0xab, 0x1f, 0xc2, 0x5e, 0x15, 0xca,
-	0x22, 0xfe, 0x6f, 0xfb, 0x8f, 0x05, 0xa5, 0xfa, 0x57, 0x10, 0x68, 0x1b, 0xac, 0xf4, 0xf7, 0xcc,
-	0xd5, 0xea, 0xad, 0xf6, 0xae, 0xcc, 0xcf, 0x9b, 0x90, 0x60, 0xf7, 0xfb, 0xea, 0x0e, 0x32, 0x63,
-	0x17, 0x0d, 0x60, 0x23, 0x25, 0xa8, 0x32, 0x6f, 0x0a, 0x65, 0xeb, 0x4d, 0x91, 0x1e, 0xcd, 0x02,
-	0x8a, 0x6b, 0x22, 0x33, 0x6b, 0xff, 0x16, 0xd0, 0xeb, 0xfb, 0x82, 0x10, 0x14, 0x4f, 0x99, 0x6f,
-	0xc2, 0xc0, 0x7a, 0x8c, 0x3a, 0x50, 0x0e, 0xc8, 0xcc, 0xe3, 0xc4, 0x35, 0x89, 0x71, 0xbd, 0x13,
-	0xf7, 0x06, 0x9d, 0xa4, 0x37, 0xe8, 0x74, 0xfd, 0x19, 0x4e, 0x40, 0xed, 0x47, 0x70, 0x23, 0xf7,
-	0x78, 0xd1, 0x0e, 0xd4, 0xd2, 0x84, 0x5b, 0xac, 0xf5, 0xea, 0xfc, 0xbc, 0x69, 0xa5, 0x99, 0xb9,
-	0xdf, 0xc7, 0x56, 0x0a, 0xda, 0x77, 0xdb, 0x7f, 0xab, 0xc2, 0xc6, 0x52, 0xda, 0xa2, 0xeb, 0xb0,
-	0xce, 0xa6, 0x64, 0x4c, 0x4d, 0x8c, 0xf1, 0x04, 0x0d, 0xa0, 0xe4, 0x91, 0x63, 0xea, 0xa9, 0xe4,
-	0x55, 0x07, 0xf7, 0x83, 0x4b, 0xf3, 0xbf, 0xf3, 0x6b, 0x8d, 0x1f, 0xf8, 0x52, 0xcc, 0xb0, 0x21,
-	0x23, 0x1b, 0xca, 0x0e, 0x9f, 0x4e, 0x89, 0xaf, 0xca, 0xc4, 0xda, 0x56, 0x15, 0x27, 0x53, 0xb5,
-	0x33, 0x44, 0x8c, 0x43, 0xbb, 0xa8, 0xcd, 0x7a, 0x8c, 0xea, 0xb0, 0x46, 0xfd, 0x33, 0x7b, 0x5d,
-	0x9b, 0xd4, 0x50, 0x59, 0x5c, 0x16, 0x67, 0x5f, 0x15, 0xab, 0xa1, 0xe2, 0x45, 0x21, 0x15, 0x76,
-	0x39, 0xde, 0x51, 0x35, 0x46, 0x3f, 0x81, 0xd2, 0x94, 0x47, 0xbe, 0x0c, 0xed, 0x8a, 0x0e, 0xf6,
-	0x56, 0x5e, 0xb0, 0x8f, 0x15, 0xc2, 0x28, 0xcb, 0xc0, 0xd1, 0x00, 0xae, 0x85, 0x92, 0x07, 0xa3,
-	0xb1, 0x20, 0x0e, 0x1d, 0x05, 0x54, 0x30, 0xee, 0x9a, 0x6b, 0xf8, 0xd6, 0x6b, 0x87, 0xd2, 0x37,
-	0x0d, 0x1d, 0xbe, 0xaa, 0x38, 0x7b, 0x8a, 0x32, 0xd4, 0x0c, 0x34, 0x84, 0x5a, 0x10, 0x79, 0xde,
-	0x88, 0x07, 0x71, 0x45, 0x8e, 0x73, 0xe7, 0x2d, 0xb6, 0x6c, 0x18, 0x79, 0xde, 0x93, 0x98, 0x84,
-	0xad, 0x60, 0x31, 0x41, 0x37, 0xa1, 0x34, 0x16, 0x3c, 0x0a, 0xe2, 0xbc, 0xa9, 0x62, 0x33, 0x43,
-	0x5f, 0x42, 0x39, 0xa4, 0x8e, 0xa0, 0x32, 0xb4, 0x6b, 0x7a, 0xa9, 0x1f, 0xe6, 0xfd, 0xe4, 0x50,
-	0x43, 0xd2, 0x9c, 0xc0, 0x09, 0x07, 0xdd, 0x82, 0x35, 0x29, 0x67, 0xf6, 0x46, 0xab, 0xb0, 0x55,
-	0xe9, 0x95, 0xe7, 0xe7, 0xcd, 0xb5, 0xa3, 0xa3, 0x67, 0x58, 0xd9, 0x54, 0xb5, 0x98, 0xf0, 0x50,
-	0xfa, 0x64, 0x4a, 0xed, 0x2b, 0x7a, 0x6f, 0xd3, 0x39, 0x7a, 0x06, 0xe0, 0xfa, 0xe1, 0xc8, 0xd1,
-	0xd7, 0x93, 0x7d, 0x55, 0xaf, 0xee, 0xd3, 0xcb, 0x57, 0xd7, 0x3f, 0x38, 0x34, 0x15, 0x73, 0x63,
-	0x7e, 0xde, 0xac, 0xa6, 0x53, 0x5c, 0x75, 0xfd, 0x30, 0x1e, 0xa2, 0x1e, 0x58, 0x13, 0x4a, 0x3c,
-	0x39, 0x71, 0x26, 0xd4, 0x39, 0xb5, 0xeb, 0x17, 0x97, 0xc0, 0x87, 0x1a, 0x66, 0x3c, 0x64, 0x49,
-	0x4a, 0xc1, 0x2a, 0xd4, 0xd0, 0xbe, 0xa6, 0xf7, 0x2a, 0x9e, 0xa0, 0x0f, 0x00, 0x78, 0x40, 0xfd,
-	0x51, 0x28, 0x5d, 0xe6, 0xdb, 0x48, 0x2d, 0x19, 0x57, 0x95, 0xe5, 0x50, 0x19, 0xd0, 0x6d, 0x55,
-	0xa0, 0x88, 0x3b, 0xe2, 0xbe, 0x37, 0xb3, 0xdf, 0xd3, 0x5f, 0x2b, 0xca, 0xf0, 0xc4, 0xf7, 0x66,
-	0xa8, 0x09, 0x96, 0xd6, 0x45, 0xc8, 0xc6, 0x3e, 0xf1, 0xec, 0xeb, 0x7a, 0x3f, 0x40, 0x99, 0x0e,
-	0xb5, 0x45, 0x9d, 0x43, 0xbc, 0x1b, 0xa1, 0x7d, 0xe3, 0xe2, 0x73, 0x30, 0xc1, 0x2e, 0xce, 0xc1,
-	0x70, 0xd0, 0xcf, 0x00, 0x02, 0xc1, 0xce, 0x98, 0x47, 0xc7, 0x34, 0xb4, 0x6f, 0xea, 0x45, 0x6f,
-	0xe6, 0x56, 0xa6, 0x14, 0x85, 0x33, 0x0c, 0xd4, 0x04, 0x60, 0x3e, 0x93, 0xa3, 0x33, 0xe2, 0x45,
-	0xd4, 0x7e, 0x5f, 0x45, 0xaf, 0xca, 0xaf, 0xb2, 0x3d, 0x55, 0xa6, 0xc6, 0xe7, 0x60, 0x65, 0xd2,
-	0x51, 0xa5, 0xd1, 0x29, 0x9d, 0x99, 0x0c, 0x57, 0x43, 0xb5, 0x67, 0x31, 0x79, 0x35, 0xce, 0x7a,
-	0x3d, 0xf9, 0x62, 0xf5, 0x7e, 0xa1, 0xb1, 0x03, 0x56, 0x46, 0x96, 0xe8, 0x43, 0x75, 0x3d, 0x8e,
-	0x59, 0x28, 0xc5, 0x6c, 0x44, 0x22, 0x39, 0xb1, 0x7f, 0xa1, 0x09, 0xb5, 0xc4, 0xd8, 0x8d, 0xe4,
-	0xa4, 0x31, 0x82, 0xc5, 0xe9, 0xa2, 0x16, 0x58, 0x4a, 0x35, 0x21, 0x15, 0x67, 0x54, 0xa8, 0xd6,
-	0x43, 0x1d, 0x4a, 0xd6, 0xa4, 0xd4, 0x1d, 0x52, 0x22, 0x9c, 0x89, 0xbe, 0x5c, 0xaa, 0xd8, 0xcc,
-	0xd4, 0x6d, 0x91, 0xa4, 0x90, 0xb9, 0x2d, 0xcc, 0x54, 0x35, 0x3a, 0x6a, 0x71, 0xed, 0x7f, 0x17,
-	0xa0, 0x96, 0xed, 0xa4, 0xd0, 0x6e, 0xdc, 0x01, 0xe9, 0xa5, 0x5d, 0xd9, 0xd9, 0xbe, 0xac, 0xf3,
-	0xd2, 0x37, 0xb8, 0x17, 0x29, 0xa7, 0x8f, 0xd5, 0xa3, 0x47, 0x93, 0xd1, 0x8f, 0x61, 0x3d, 0xe0,
-	0x42, 0x26, 0x77, 0x5d, 0xfe, 0x49, 0x70, 0x91, 0xd4, 0xe7, 0x18, 0xdc, 0x9e, 0xc0, 0x95, 0x65,
-	0x6f, 0xe8, 0x2e, 0xac, 0x3d, 0xdd, 0x1f, 0xd6, 0x57, 0x1a, 0xb7, 0x9f, 0xbf, 0x68, 0xbd, 0xbf,
-	0xfc, 0xf1, 0x29, 0x13, 0x32, 0x22, 0xde, 0xfe, 0x10, 0x7d, 0x02, 0xeb, 0xfd, 0x83, 0x43, 0x8c,
-	0xeb, 0x85, 0x46, 0xf3, 0xf9, 0x8b, 0xd6, 0xed, 0x65, 0x9c, 0xfa, 0xc4, 0x23, 0xdf, 0xc5, 0xfc,
-	0x38, 0x7d, 0x00, 0xfc, 0x67, 0x15, 0x2c, 0x53, 0x02, 0xde, 0xf5, 0x1b, 0x71, 0x23, 0xee, 0x6f,
-	0x92, 0xdc, 0x5e, 0xbd, 0xb4, 0xcd, 0xa9, 0xc5, 0x04, 0x73, 0xd6, 0x77, 0xa0, 0xc6, 0x82, 0xb3,
-	0xcf, 0x46, 0xd4, 0x27, 0xc7, 0x9e, 0x79, 0x0b, 0x54, 0xb0, 0xa5, 0x6c, 0x83, 0xd8, 0xa4, 0x2e,
-	0x16, 0xe6, 0x4b, 0x2a, 0x7c, 0xd3, 0xe5, 0x57, 0x70, 0x3a, 0x47, 0x5f, 0x42, 0x91, 0x05, 0x64,
-	0x6a, 0x7a, 0xb3, 0xdc, 0x15, 0xec, 0x0f, 0xbb, 0x8f, 0x8d, 0x16, 0x7b, 0x95, 0xf9, 0x79, 0xb3,
-	0xa8, 0x0c, 0x58, 0xd3, 0xd0, 0x66, 0xd2, 0x1e, 0xa9, 0x3f, 0xe9, 0x22, 0x51, 0xc1, 0x19, 0x8b,
-	0xd2, 0x13, 0xf3, 0xc7, 0x82, 0x86, 0xa1, 0x2e, 0x17, 0x15, 0x9c, 0x4c, 0x51, 0x03, 0xca, 0xa6,
-	0xc9, 0xd2, 0x5d, 0x55, 0x55, 0x35, 0x30, 0xc6, 0xd0, 0xdb, 0x00, 0x2b, 0xde, 0x8d, 0xd1, 0x89,
-	0xe0, 0xd3, 0xf6, 0x7f, 0x8b, 0x60, 0xed, 0x7a, 0x51, 0x28, 0x4d, 0xbd, 0x7c, 0x67, 0x9b, 0xff,
-	0x0c, 0xae, 0x11, 0xfd, 0xe6, 0x24, 0xbe, 0x2a, 0x3e, 0xba, 0x77, 0x35, 0x07, 0x70, 0x37, 0xd7,
-	0x5d, 0x0a, 0x8e, 0xfb, 0xdc, 0x5e, 0x49, 0xf9, 0xb4, 0x0b, 0xb8, 0x4e, 0x5e, 0xf9, 0x82, 0x0e,
-	0x61, 0x83, 0x0b, 0x67, 0x42, 0x43, 0x19, 0x97, 0x2c, 0xf3, 0x46, 0xcb, 0x7d, 0xbd, 0x3f, 0xc9,
-	0x02, 0xcd, 0x7d, 0x1d, 0x47, 0xbb, 0xec, 0x03, 0xdd, 0x87, 0xa2, 0x20, 0x27, 0x49, 0x1f, 0x9e,
-	0x9b, 0x24, 0x98, 0x9c, 0xc8, 0x25, 0x17, 0x9a, 0x81, 0x7e, 0x05, 0xe0, 0xb2, 0x30, 0x20, 0xd2,
-	0x99, 0x50, 0x61, 0x0e, 0x3b, 0x77, 0x89, 0xfd, 0x14, 0xb5, 0xe4, 0x25, 0xc3, 0x46, 0x8f, 0xa0,
-	0xea, 0x90, 0x44, 0xae, 0xa5, 0x8b, 0x1f, 0xae, 0xbb, 0x5d, 0xe3, 0xa2, 0xae, 0x5c, 0xcc, 0xcf,
-	0x9b, 0x95, 0xc4, 0x82, 0x2b, 0x0e, 0x31, 0xf2, 0x7d, 0x04, 0x1b, 0xea, 0x41, 0x3b, 0x72, 0xe9,
-	0x09, 0x89, 0x3c, 0x19, 0xcb, 0xe4, 0x82, 0xfa, 0xa3, 0x5e, 0x47, 0x7d, 0x83, 0x33, 0x71, 0xd5,
-	0x64, 0xc6, 0x86, 0x7e, 0x03, 0xd7, 0xa8, 0xef, 0x88, 0x99, 0x16, 0x6b, 0x12, 0x61, 0xe5, 0xe2,
-	0xc5, 0x0e, 0x52, 0xf0, 0xd2, 0x62, 0xeb, 0xf4, 0x15, 0x7b, 0xfb, 0x9f, 0x05, 0x80, 0xb8, 0xa4,
-	0xbf, 0x5b, 0x01, 0x22, 0x28, 0xba, 0x44, 0x12, 0xad, 0xb9, 0x1a, 0xd6, 0x63, 0xf4, 0x05, 0x80,
-	0xa4, 0xd3, 0xc0, 0x23, 0x92, 0xf9, 0x63, 0x23, 0x9b, 0x37, 0x5d, 0x07, 0x19, 0x34, 0xda, 0x81,
-	0x92, 0x79, 0x2d, 0x15, 0x2f, 0xe5, 0x19, 0x64, 0xfb, 0x2f, 0x05, 0x80, 0x78, 0x99, 0xdf, 0xe9,
-	0xb5, 0xf5, 0xec, 0x97, 0xdf, 0x6e, 0xae, 0xfc, 0xe3, 0xdb, 0xcd, 0x95, 0x3f, 0xcc, 0x37, 0x0b,
-	0x2f, 0xe7, 0x9b, 0x85, 0xbf, 0xcf, 0x37, 0x0b, 0xff, 0x9a, 0x6f, 0x16, 0x8e, 0x4b, 0xba, 0x41,
-	0xfc, 0xd1, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x4f, 0xdf, 0x5d, 0x60, 0x83, 0x14, 0x00, 0x00,
+	// 2131 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7,
+	0x15, 0x17, 0x25, 0x8a, 0x22, 0xdf, 0x52, 0x36, 0x35, 0x71, 0x9c, 0x15, 0x6d, 0x4b, 0x34, 0xe3,
+	0xb8, 0x4a, 0x82, 0x52, 0xa8, 0x1a, 0xa4, 0x4e, 0xdc, 0xb4, 0x25, 0x45, 0x46, 0x66, 0x6d, 0x4b,
+	0xc4, 0x50, 0x56, 0x6b, 0xa0, 0x00, 0x31, 0xda, 0x1d, 0x91, 0x03, 0x2d, 0x77, 0xb6, 0xb3, 0x43,
+	0x19, 0xbc, 0xf5, 0x18, 0xa8, 0x9f, 0x41, 0xe8, 0xa1, 0xe8, 0xbd, 0xfd, 0x16, 0x3e, 0xf6, 0xd8,
+	0x5e, 0x84, 0x44, 0x5f, 0xa1, 0xb7, 0x5e, 0x5a, 0xcc, 0xec, 0xec, 0x92, 0x94, 0x57, 0x96, 0x81,
+	0xfa, 0xd0, 0xdb, 0xcc, 0xdb, 0xdf, 0xef, 0xcd, 0xbf, 0xdf, 0xbc, 0xf7, 0x66, 0xe1, 0xb3, 0x3e,
+	0x93, 0x83, 0xd1, 0x61, 0xcd, 0xe1, 0xc3, 0x4d, 0x97, 0x3b, 0xc7, 0x54, 0x6c, 0x86, 0xaf, 0x88,
+	0x18, 0x1e, 0x33, 0xb9, 0x49, 0x02, 0xb6, 0x19, 0x06, 0xd4, 0x09, 0x6b, 0x81, 0xe0, 0x92, 0x23,
+	0x14, 0x01, 0x6a, 0x31, 0xa0, 0x76, 0xf2, 0x93, 0xf2, 0x75, 0x7c, 0x39, 0x0e, 0xa8, 0xe1, 0x97,
+	0x6f, 0xf5, 0x79, 0x9f, 0xeb, 0xe6, 0xa6, 0x6a, 0x19, 0xeb, 0x5a, 0x9f, 0xf3, 0xbe, 0x47, 0x37,
+	0x75, 0xef, 0x70, 0x74, 0xb4, 0xe9, 0x8e, 0x04, 0x91, 0x8c, 0xfb, 0xe6, 0xfb, 0xea, 0xe5, 0xef,
+	0xc4, 0x1f, 0x5f, 0x45, 0x7d, 0x25, 0x48, 0x10, 0x50, 0x61, 0x06, 0xac, 0x9e, 0x65, 0x21, 0xbf,
+	0xcb, 0x5d, 0xda, 0x0d, 0xa8, 0x83, 0x76, 0xc0, 0x22, 0xbe, 0xcf, 0xa5, 0xf6, 0x1d, 0xda, 0x99,
+	0x4a, 0x66, 0xc3, 0xda, 0x5a, 0xaf, 0xbd, 0xb9, 0xa6, 0x5a, 0x7d, 0x02, 0x6b, 0x64, 0x5f, 0x9f,
+	0xaf, 0xcf, 0xe1, 0x69, 0x26, 0xfa, 0x25, 0x14, 0x5d, 0x1a, 0x32, 0x41, 0xdd, 0x9e, 0xe0, 0x1e,
+	0xb5, 0xe7, 0x2b, 0x99, 0x8d, 0x1b, 0x5b, 0x77, 0xd3, 0x3c, 0xa9, 0xc1, 0x31, 0xf7, 0x28, 0xb6,
+	0x0c, 0x43, 0x75, 0xd0, 0x0e, 0xc0, 0x90, 0x0e, 0x0f, 0xa9, 0x08, 0x07, 0x2c, 0xb0, 0x17, 0x34,
+	0xfd, 0x47, 0x57, 0xd1, 0xd5, 0xdc, 0x6b, 0xcf, 0x13, 0x38, 0x9e, 0xa2, 0xa2, 0xe7, 0x50, 0x24,
+	0x27, 0x84, 0x79, 0xe4, 0x90, 0x79, 0x4c, 0x8e, 0xed, 0xac, 0x76, 0xf5, 0xe9, 0x5b, 0x5d, 0xd5,
+	0xa7, 0x08, 0x78, 0x86, 0x5e, 0x75, 0x01, 0x26, 0x03, 0xa1, 0x87, 0xb0, 0xd4, 0x69, 0xed, 0x36,
+	0xdb, 0xbb, 0x3b, 0xa5, 0xb9, 0xf2, 0xea, 0xe9, 0x59, 0xe5, 0x43, 0xe5, 0x63, 0x02, 0xe8, 0x50,
+	0xdf, 0x65, 0x7e, 0x1f, 0x6d, 0x40, 0xbe, 0xbe, 0xbd, 0xdd, 0xea, 0xec, 0xb7, 0x9a, 0xa5, 0x4c,
+	0xb9, 0x7c, 0x7a, 0x56, 0xb9, 0x3d, 0x0b, 0xac, 0x3b, 0x0e, 0x0d, 0x24, 0x75, 0xcb, 0xd9, 0xef,
+	0xfe, 0xbc, 0x36, 0x57, 0xfd, 0x2e, 0x03, 0xc5, 0xe9, 0x49, 0xa0, 0x87, 0x90, 0xab, 0x6f, 0xef,
+	0xb7, 0x0f, 0x5a, 0xa5, 0xb9, 0x09, 0x7d, 0x1a, 0x51, 0x77, 0x24, 0x3b, 0xa1, 0xe8, 0x01, 0x2c,
+	0x76, 0xea, 0x2f, 0xba, 0xad, 0x52, 0x66, 0x32, 0x9d, 0x69, 0x58, 0x87, 0x8c, 0x42, 0x8d, 0x6a,
+	0xe2, 0x7a, 0x7b, 0xb7, 0x34, 0x9f, 0x8e, 0x6a, 0x0a, 0xc2, 0x7c, 0x33, 0x95, 0x3f, 0x65, 0xc1,
+	0xea, 0x52, 0x71, 0xc2, 0x9c, 0xf7, 0x2c, 0x91, 0x2f, 0x21, 0x2b, 0x49, 0x78, 0xac, 0xa5, 0x61,
+	0xa5, 0x4b, 0x63, 0x9f, 0x84, 0xc7, 0x6a, 0x50, 0x43, 0xd7, 0x78, 0xa5, 0x0c, 0x41, 0x03, 0x8f,
+	0x39, 0x44, 0x52, 0x57, 0x2b, 0xc3, 0xda, 0xfa, 0x24, 0x8d, 0x8d, 0x13, 0x94, 0x99, 0xff, 0x93,
+	0x39, 0x3c, 0x45, 0x45, 0x8f, 0x21, 0xd7, 0xf7, 0xf8, 0x21, 0xf1, 0xb4, 0x26, 0xac, 0xad, 0xfb,
+	0x69, 0x4e, 0x76, 0x34, 0x62, 0xe2, 0xc0, 0x50, 0xd0, 0x23, 0xc8, 0x8d, 0x02, 0x97, 0x48, 0x6a,
+	0xe7, 0x34, 0xb9, 0x92, 0x46, 0x7e, 0xa1, 0x11, 0xdb, 0xdc, 0x3f, 0x62, 0x7d, 0x6c, 0xf0, 0xe8,
+	0x29, 0xe4, 0x7d, 0x2a, 0x5f, 0x71, 0x71, 0x1c, 0xda, 0x4b, 0x95, 0x85, 0x0d, 0x6b, 0xeb, 0xf3,
+	0x54, 0x31, 0x46, 0x98, 0xba, 0x94, 0xc4, 0x19, 0x0c, 0xa9, 0x2f, 0x23, 0x37, 0x8d, 0x79, 0x3b,
+	0x83, 0x13, 0x07, 0xe8, 0xe7, 0x90, 0xa7, 0xbe, 0x1b, 0x70, 0xe6, 0x4b, 0x3b, 0x7f, 0xf5, 0x44,
+	0x5a, 0x06, 0xa3, 0x36, 0x13, 0x27, 0x0c, 0xc5, 0x16, 0xdc, 0xf3, 0x0e, 0x89, 0x73, 0x6c, 0x17,
+	0xde, 0x71, 0x19, 0x09, 0xa3, 0x91, 0x83, 0xec, 0x90, 0xbb, 0xb4, 0xba, 0x09, 0x2b, 0x6f, 0x6c,
+	0x35, 0x2a, 0x43, 0xde, 0x6c, 0x75, 0xa4, 0x91, 0x2c, 0x4e, 0xfa, 0xd5, 0x9b, 0xb0, 0x3c, 0xb3,
+	0xad, 0xd5, 0xbf, 0x2e, 0x42, 0x3e, 0x3e, 0x6b, 0x54, 0x87, 0x82, 0xc3, 0x7d, 0x49, 0x98, 0x4f,
+	0x85, 0x91, 0x57, 0xea, 0xc9, 0x6c, 0xc7, 0x20, 0xc5, 0x7a, 0x32, 0x87, 0x27, 0x2c, 0xf4, 0x2d,
+	0x14, 0x04, 0x0d, 0xf9, 0x48, 0x38, 0x34, 0x34, 0xfa, 0xda, 0x48, 0x57, 0x48, 0x04, 0xc2, 0xf4,
+	0xf7, 0x23, 0x26, 0xa8, 0xda, 0xe5, 0x10, 0x4f, 0xa8, 0xe8, 0x31, 0x2c, 0x09, 0x1a, 0x4a, 0x22,
+	0xe4, 0xdb, 0x24, 0x82, 0x23, 0x48, 0x87, 0x7b, 0xcc, 0x19, 0xe3, 0x98, 0x81, 0x1e, 0x43, 0x21,
+	0xf0, 0x88, 0xa3, 0xbd, 0xda, 0x8b, 0x9a, 0x7e, 0x2f, 0x8d, 0xde, 0x89, 0x41, 0x78, 0x82, 0x47,
+	0x5f, 0x01, 0x78, 0xbc, 0xdf, 0x73, 0x05, 0x3b, 0xa1, 0xc2, 0x48, 0xac, 0x9c, 0xc6, 0x6e, 0x6a,
+	0x04, 0x2e, 0x78, 0xbc, 0x1f, 0x35, 0xd1, 0xce, 0xff, 0xa4, 0xaf, 0x29, 0x6d, 0x3d, 0x05, 0x20,
+	0xc9, 0x57, 0xa3, 0xae, 0x4f, 0xdf, 0xc9, 0x95, 0x39, 0x91, 0x29, 0x3a, 0xba, 0x0f, 0xc5, 0x23,
+	0x2e, 0x1c, 0xda, 0x33, 0xb7, 0xa6, 0xa0, 0x35, 0x61, 0x69, 0x5b, 0xa4, 0x2f, 0xd4, 0x80, 0xa5,
+	0x3e, 0xf5, 0xa9, 0x60, 0x8e, 0x0d, 0x7a, 0xb0, 0x87, 0xa9, 0x17, 0x32, 0x82, 0xe0, 0x91, 0x2f,
+	0xd9, 0x90, 0x9a, 0x91, 0x62, 0x22, 0xfa, 0x1d, 0x7c, 0x10, 0x1f, 0x5f, 0x4f, 0xd0, 0x23, 0x2a,
+	0xa8, 0xaf, 0x34, 0x60, 0xe9, 0x7d, 0xf8, 0xe4, 0xed, 0x1a, 0x30, 0x68, 0x13, 0x6c, 0x90, 0xb8,
+	0xfc, 0x21, 0x6c, 0x14, 0x60, 0x49, 0x44, 0xe3, 0x56, 0xff, 0x98, 0x51, 0xaa, 0xbf, 0x84, 0x40,
+	0x9b, 0x60, 0x25, 0xc3, 0x33, 0x57, 0xab, 0xb7, 0xd0, 0xb8, 0x71, 0x71, 0xbe, 0x0e, 0x31, 0xb6,
+	0xdd, 0x54, 0x31, 0xc8, 0xb4, 0x5d, 0xd4, 0x82, 0xe5, 0x84, 0xa0, 0xca, 0x00, 0x93, 0x28, 0x2b,
+	0x6f, 0x9b, 0xe9, 0xfe, 0x38, 0xa0, 0xb8, 0x28, 0xa6, 0x7a, 0xd5, 0xdf, 0x02, 0x7a, 0x73, 0x5f,
+	0x10, 0x82, 0xec, 0x31, 0xf3, 0xcd, 0x34, 0xb0, 0x6e, 0xa3, 0x1a, 0x2c, 0x05, 0x64, 0xec, 0x71,
+	0xe2, 0x9a, 0x8b, 0x71, 0xab, 0x16, 0x15, 0x08, 0xb5, 0xb8, 0x40, 0xa8, 0xd5, 0xfd, 0x31, 0x8e,
+	0x41, 0xd5, 0xa7, 0xf0, 0x61, 0xea, 0xf1, 0xa2, 0x2d, 0x28, 0x26, 0x17, 0x6e, 0xb2, 0xd6, 0x9b,
+	0x17, 0xe7, 0xeb, 0x56, 0x72, 0x33, 0xdb, 0x4d, 0x6c, 0x25, 0xa0, 0xb6, 0x5b, 0xfd, 0xde, 0x82,
+	0xe5, 0x99, 0x6b, 0x8b, 0x6e, 0xc1, 0x22, 0x1b, 0x92, 0x3e, 0x35, 0x73, 0x8c, 0x3a, 0xa8, 0x05,
+	0x39, 0x8f, 0x1c, 0x52, 0x4f, 0x5d, 0x5e, 0x75, 0x70, 0x3f, 0xbe, 0xf6, 0xfe, 0xd7, 0x9e, 0x69,
+	0x7c, 0xcb, 0x97, 0x62, 0x8c, 0x0d, 0x19, 0xd9, 0xb0, 0xe4, 0xf0, 0xe1, 0x90, 0xf8, 0x2a, 0x4d,
+	0x2c, 0x6c, 0x14, 0x70, 0xdc, 0x55, 0x3b, 0x43, 0x44, 0x3f, 0xb4, 0xb3, 0xda, 0xac, 0xdb, 0xa8,
+	0x04, 0x0b, 0xd4, 0x3f, 0xb1, 0x17, 0xb5, 0x49, 0x35, 0x95, 0xc5, 0x65, 0xd1, 0xed, 0x2b, 0x60,
+	0xd5, 0x54, 0xbc, 0x51, 0x48, 0x85, 0xbd, 0x14, 0xed, 0xa8, 0x6a, 0xa3, 0x9f, 0x41, 0x6e, 0xc8,
+	0x47, 0xbe, 0x0c, 0xed, 0xbc, 0x9e, 0xec, 0x6a, 0xda, 0x64, 0x9f, 0x2b, 0x84, 0x51, 0x96, 0x81,
+	0xa3, 0x16, 0xac, 0x84, 0x92, 0x07, 0xbd, 0xbe, 0x20, 0x0e, 0xed, 0x05, 0x54, 0x30, 0xee, 0x9a,
+	0x30, 0xbc, 0xfa, 0xc6, 0xa1, 0x34, 0x4d, 0xc1, 0x87, 0x6f, 0x2a, 0xce, 0x8e, 0xa2, 0x74, 0x34,
+	0x03, 0x75, 0xa0, 0x18, 0x8c, 0x3c, 0xaf, 0xc7, 0x83, 0x28, 0x23, 0x47, 0x77, 0xe7, 0x1d, 0xb6,
+	0xac, 0x33, 0xf2, 0xbc, 0xbd, 0x88, 0x84, 0xad, 0x60, 0xd2, 0x41, 0xb7, 0x21, 0xd7, 0x17, 0x7c,
+	0x14, 0x44, 0xf7, 0xa6, 0x80, 0x4d, 0x0f, 0x7d, 0x03, 0x4b, 0x21, 0x75, 0x04, 0x95, 0xa1, 0x5d,
+	0xd4, 0x4b, 0xfd, 0x38, 0x6d, 0x90, 0xae, 0x86, 0x24, 0x77, 0x02, 0xc7, 0x1c, 0xb4, 0x0a, 0x0b,
+	0x52, 0x8e, 0xed, 0xe5, 0x4a, 0x66, 0x23, 0xdf, 0x58, 0xba, 0x38, 0x5f, 0x5f, 0xd8, 0xdf, 0x7f,
+	0x89, 0x95, 0x4d, 0x65, 0x8b, 0x01, 0x0f, 0xa5, 0x4f, 0x86, 0xd4, 0xbe, 0xa1, 0xf7, 0x36, 0xe9,
+	0xa3, 0x97, 0x00, 0xae, 0x1f, 0xf6, 0x1c, 0x1d, 0x9e, 0xec, 0x9b, 0x7a, 0x75, 0x9f, 0x5f, 0xbf,
+	0xba, 0xe6, 0x6e, 0xd7, 0x64, 0xcc, 0xe5, 0x8b, 0xf3, 0xf5, 0x42, 0xd2, 0xc5, 0x05, 0xd7, 0x0f,
+	0xa3, 0x26, 0x6a, 0x80, 0x35, 0xa0, 0xc4, 0x93, 0x03, 0x67, 0x40, 0x9d, 0x63, 0xbb, 0x74, 0x75,
+	0x0a, 0x7c, 0xa2, 0x61, 0xc6, 0xc3, 0x34, 0x49, 0x29, 0x58, 0x4d, 0x35, 0xb4, 0x57, 0xf4, 0x5e,
+	0x45, 0x1d, 0x74, 0x0f, 0x80, 0x07, 0xd4, 0xef, 0x85, 0xd2, 0x65, 0xbe, 0x8d, 0xd4, 0x92, 0x71,
+	0x41, 0x59, 0xba, 0xca, 0x80, 0xee, 0xa8, 0x04, 0x45, 0xdc, 0x1e, 0xf7, 0xbd, 0xb1, 0xfd, 0x81,
+	0xfe, 0x9a, 0x57, 0x86, 0x3d, 0xdf, 0x1b, 0xa3, 0x75, 0xb0, 0xb4, 0x2e, 0x42, 0xd6, 0xf7, 0x89,
+	0x67, 0xdf, 0xd2, 0xfb, 0x01, 0xca, 0xd4, 0xd5, 0x16, 0x75, 0x0e, 0xd1, 0x6e, 0x84, 0xf6, 0x87,
+	0x57, 0x9f, 0x83, 0x99, 0xec, 0xe4, 0x1c, 0x0c, 0x07, 0xfd, 0x02, 0x20, 0x10, 0xec, 0x84, 0x79,
+	0xb4, 0x4f, 0x43, 0xfb, 0xb6, 0x5e, 0xf4, 0x5a, 0x6a, 0x66, 0x4a, 0x50, 0x78, 0x8a, 0x81, 0x6a,
+	0x90, 0x65, 0x3e, 0x93, 0xf6, 0x47, 0x26, 0x2b, 0x5d, 0x96, 0x6a, 0x83, 0x73, 0xef, 0x80, 0x78,
+	0x23, 0x8a, 0x35, 0x0e, 0xb5, 0xa1, 0xc0, 0x42, 0xee, 0x69, 0xf9, 0xda, 0xb6, 0x8e, 0x6f, 0xef,
+	0x70, 0x7e, 0xed, 0x98, 0x82, 0x27, 0x6c, 0x74, 0x17, 0x0a, 0x01, 0x73, 0xc3, 0x67, 0x6c, 0xc8,
+	0xa4, 0xbd, 0x5a, 0xc9, 0x6c, 0x2c, 0xe0, 0x89, 0xa1, 0xfc, 0x15, 0x58, 0x53, 0x61, 0x40, 0x5d,
+	0xdf, 0x63, 0x3a, 0x36, 0x91, 0x45, 0x35, 0xd5, 0x59, 0x9d, 0xa8, 0x89, 0xe9, 0xd0, 0x57, 0xc0,
+	0x51, 0xe7, 0xeb, 0xf9, 0x47, 0x99, 0xf2, 0x16, 0x58, 0x53, 0xd7, 0x01, 0x7d, 0xac, 0xc2, 0x72,
+	0x9f, 0x85, 0x52, 0x8c, 0x7b, 0x64, 0x24, 0x07, 0xf6, 0xaf, 0x34, 0xa1, 0x18, 0x1b, 0xeb, 0x23,
+	0x39, 0x28, 0xf7, 0x60, 0xa2, 0x2a, 0x54, 0x01, 0x4b, 0xa9, 0x35, 0xa4, 0xe2, 0x84, 0x0a, 0x55,
+	0xf2, 0x28, 0x31, 0x4c, 0x9b, 0xd4, 0xad, 0x0a, 0x29, 0x11, 0xce, 0x40, 0x07, 0xb5, 0x02, 0x36,
+	0x3d, 0x15, 0xa5, 0xe2, 0xab, 0x6b, 0xa2, 0x94, 0xe9, 0x56, 0xff, 0x96, 0x81, 0x42, 0xb2, 0x0d,
+	0xe8, 0x0b, 0x58, 0x69, 0x77, 0xf7, 0x9e, 0xd5, 0xf7, 0xdb, 0x7b, 0xbb, 0xbd, 0x66, 0xeb, 0xdb,
+	0xfa, 0x8b, 0x67, 0xfb, 0xa5, 0xb9, 0xf2, 0xbd, 0xd3, 0xb3, 0xca, 0xea, 0x24, 0xe2, 0xc6, 0xf0,
+	0x26, 0x3d, 0x22, 0x23, 0x4f, 0xce, 0xb2, 0x3a, 0x78, 0x6f, 0xbb, 0xd5, 0xed, 0x96, 0x32, 0x57,
+	0xb1, 0x3a, 0x82, 0x3b, 0x34, 0x0c, 0xd1, 0x16, 0x94, 0x26, 0xac, 0x27, 0x2f, 0x3b, 0x2d, 0x7c,
+	0x50, 0x9a, 0x2f, 0xdf, 0x3d, 0x3d, 0xab, 0xd8, 0x6f, 0x92, 0x9e, 0x8c, 0x03, 0x2a, 0x0e, 0xcc,
+	0x73, 0xe1, 0x5f, 0x19, 0x28, 0x4e, 0x57, 0x9b, 0x68, 0x3b, 0xaa, 0x12, 0xf5, 0x31, 0xdc, 0xd8,
+	0xda, 0xbc, 0xae, 0x3a, 0xd5, 0x59, 0xce, 0x1b, 0x29, 0xbf, 0xcf, 0xd5, 0xc3, 0x50, 0x93, 0xd1,
+	0x17, 0xb0, 0x18, 0x70, 0x21, 0xe3, 0x7c, 0x90, 0xae, 0x56, 0x2e, 0xe2, 0x1a, 0x26, 0x02, 0x57,
+	0x07, 0x70, 0x63, 0xd6, 0x1b, 0x7a, 0x00, 0x0b, 0x07, 0xed, 0x4e, 0x69, 0xae, 0x7c, 0xe7, 0xf4,
+	0xac, 0xf2, 0xd1, 0xec, 0xc7, 0x03, 0x26, 0xe4, 0x88, 0x78, 0xed, 0x0e, 0xfa, 0x0c, 0x16, 0x9b,
+	0xbb, 0x5d, 0x8c, 0x4b, 0x99, 0xf2, 0xfa, 0xe9, 0x59, 0xe5, 0xce, 0x2c, 0x4e, 0x7d, 0xe2, 0x23,
+	0xdf, 0xc5, 0xfc, 0x30, 0x79, 0x24, 0xfd, 0x7b, 0x1e, 0x2c, 0x93, 0x26, 0xdf, 0xf7, 0x3b, 0x7a,
+	0x39, 0xaa, 0x01, 0xe3, 0xf8, 0x37, 0x7f, 0x6d, 0x29, 0x58, 0x8c, 0x08, 0x46, 0x97, 0xf7, 0xa1,
+	0xc8, 0x82, 0x93, 0x2f, 0x7b, 0xd4, 0x27, 0x87, 0x9e, 0x79, 0x2f, 0xe5, 0xb1, 0xa5, 0x6c, 0xad,
+	0xc8, 0xa4, 0x82, 0x2f, 0xf3, 0x25, 0x15, 0xbe, 0x79, 0x09, 0xe5, 0x71, 0xd2, 0x47, 0xdf, 0x40,
+	0x96, 0x05, 0x64, 0x68, 0xea, 0xd7, 0xd4, 0x15, 0xb4, 0x3b, 0xf5, 0xe7, 0xe6, 0xde, 0x34, 0xf2,
+	0x17, 0xe7, 0xeb, 0x59, 0x65, 0xc0, 0x9a, 0x86, 0xd6, 0xe2, 0x12, 0x52, 0x8d, 0xa4, 0x13, 0x69,
+	0x1e, 0x4f, 0x59, 0x94, 0xf6, 0x99, 0xdf, 0x17, 0x34, 0x0c, 0x75, 0x4a, 0xcd, 0xe3, 0xb8, 0x8b,
+	0xca, 0xb0, 0x64, 0x0a, 0x51, 0x5d, 0x79, 0x16, 0x54, 0x91, 0x67, 0x0c, 0x8d, 0x65, 0xb0, 0xa2,
+	0xdd, 0xe8, 0x1d, 0x09, 0x3e, 0xac, 0xfe, 0x27, 0x0b, 0xd6, 0xb6, 0x37, 0x0a, 0xa5, 0xa9, 0x29,
+	0xde, 0xdb, 0xe6, 0xbf, 0x84, 0x15, 0xa2, 0xdf, 0xe5, 0xc4, 0x57, 0x09, 0x5a, 0xd7, 0xf7, 0xe6,
+	0x00, 0x1e, 0xa4, 0xba, 0x4b, 0xc0, 0xd1, 0x5b, 0xa0, 0x91, 0x53, 0x3e, 0xed, 0x0c, 0x2e, 0x91,
+	0x4b, 0x5f, 0x50, 0x17, 0x96, 0xb9, 0x70, 0x06, 0x34, 0x94, 0x51, 0x5a, 0x37, 0xef, 0xd8, 0xd4,
+	0x3f, 0x1c, 0x7b, 0xd3, 0x40, 0x93, 0xd3, 0xa2, 0xd9, 0xce, 0xfa, 0x40, 0x8f, 0x20, 0x2b, 0xc8,
+	0x51, 0xfc, 0x56, 0x49, 0xbd, 0x24, 0x98, 0x1c, 0xc9, 0x19, 0x17, 0x9a, 0x81, 0x7e, 0x0d, 0xe0,
+	0xb2, 0x30, 0x20, 0xd2, 0x19, 0x50, 0x61, 0x0e, 0x3b, 0x75, 0x89, 0xcd, 0x04, 0x35, 0xe3, 0x65,
+	0x8a, 0x8d, 0x9e, 0x42, 0xc1, 0x21, 0xb1, 0x5c, 0x73, 0x57, 0x3f, 0xee, 0xb7, 0xeb, 0xc6, 0x45,
+	0x49, 0xb9, 0xb8, 0x38, 0x5f, 0xcf, 0xc7, 0x16, 0x9c, 0x77, 0x88, 0x91, 0xef, 0x53, 0x58, 0x56,
+	0x8f, 0xfe, 0x9e, 0x1b, 0x85, 0xb3, 0x48, 0x26, 0x57, 0xe4, 0x68, 0xf5, 0x82, 0x34, 0x61, 0x2f,
+	0x3e, 0xce, 0xa2, 0x9c, 0xb2, 0xa1, 0xdf, 0xc0, 0x0a, 0xf5, 0x1d, 0x31, 0xd6, 0x62, 0x8d, 0x67,
+	0x98, 0xbf, 0x7a, 0xb1, 0xad, 0x04, 0x3c, 0xb3, 0xd8, 0x12, 0xbd, 0x64, 0xaf, 0xfe, 0x33, 0x03,
+	0x10, 0x95, 0x3d, 0xef, 0x57, 0x80, 0x08, 0xb2, 0x2e, 0x91, 0x44, 0x6b, 0xae, 0x88, 0x75, 0x1b,
+	0x7d, 0x0d, 0x20, 0xe9, 0x30, 0x50, 0xa1, 0xd7, 0xef, 0x1b, 0xd9, 0xbc, 0x2d, 0x1c, 0x4c, 0xa1,
+	0xd1, 0x16, 0xe4, 0xcc, 0x8b, 0x32, 0x7b, 0x2d, 0xcf, 0x20, 0xab, 0x7f, 0xc9, 0x00, 0x44, 0xcb,
+	0xfc, 0xbf, 0x5e, 0x5b, 0xc3, 0x7e, 0xfd, 0xc3, 0xda, 0xdc, 0x3f, 0x7e, 0x58, 0x9b, 0xfb, 0xc3,
+	0xc5, 0x5a, 0xe6, 0xf5, 0xc5, 0x5a, 0xe6, 0xef, 0x17, 0x6b, 0x99, 0xef, 0x2f, 0xd6, 0x32, 0x87,
+	0x39, 0x5d, 0x99, 0xfc, 0xf4, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb8, 0xa3, 0x85, 0xdc, 0xc7,
+	0x15, 0x00, 0x00,
 }
diff --git a/vendor/github.com/docker/swarmkit/api/specs.proto b/vendor/github.com/docker/swarmkit/api/specs.proto
index c7c365b..14448d0 100644
--- a/vendor/github.com/docker/swarmkit/api/specs.proto
+++ b/vendor/github.com/docker/swarmkit/api/specs.proto
@@ -6,6 +6,7 @@
 import "gogoproto/gogo.proto";
 import "google/protobuf/duration.proto";
 import "google/protobuf/any.proto";
+import "google/protobuf/wrappers.proto";
 
 // Specs are container objects for user provided input. All creations and
 // updates are done through spec types. As a convention, user input from a spec
@@ -215,6 +216,9 @@
 	// Privileges specifies security configuration/permissions.
 	Privileges privileges = 22;
 
+	// Init declares that a custom init will be running inside the container, if null, use the daemon's configured settings
+	google.protobuf.BoolValue init = 23;
+
 	// TTY declares that a TTY should be attached to the standard streams,
 	// including stdin if it is still open.
 	bool tty = 13 [(gogoproto.customname) = "TTY"];
@@ -293,11 +297,27 @@
 	// task will exit and a new task will be rescheduled elsewhere. A container
 	// is considered unhealthy after `Retries` number of consecutive failures.
 	HealthConfig healthcheck = 16;
-	
-	// Run a custom init inside the container, if null, use the daemon's configured settings
-	oneof init {
-		bool init_value = 23;
+
+	enum Isolation {
+		option (gogoproto.goproto_enum_prefix) = false;
+
+		// ISOLATION_DEFAULT uses whatever default value from the container runtime
+		ISOLATION_DEFAULT = 0 [(gogoproto.enumvalue_customname) = "ContainerIsolationDefault"];
+
+		// ISOLATION_PROCESS forces windows container isolation
+		ISOLATION_PROCESS = 1 [(gogoproto.enumvalue_customname) = "ContainerIsolationProcess"];
+
+		// ISOLATION_HYPERV forces Hyper-V isolation
+		ISOLATION_HYPERV = 2 [(gogoproto.enumvalue_customname) = "ContainerIsolationHyperV"];
 	}
+
+	// Isolation defines the isolation level for windows containers (default, process, hyperv).
+	// Runtimes that don't support it ignore that field
+	Isolation isolation = 24;
+
+	// PidsLimit prevents from OS resource damage by applications inside the container 
+	// using fork bomb attack.
+	int64 pidsLimit = 25;
 }
 
 // EndpointSpec defines the properties that can be configured to
diff --git a/vendor/github.com/docker/swarmkit/api/types.pb.go b/vendor/github.com/docker/swarmkit/api/types.pb.go
index 07e1fe0..33e2281 100644
--- a/vendor/github.com/docker/swarmkit/api/types.pb.go
+++ b/vendor/github.com/docker/swarmkit/api/types.pb.go
@@ -219,6 +219,36 @@
 }
 func (Mount_MountType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTypes, []int{16, 0} }
 
+// Consistency indicates the tolerable level of file system consistency
+type Mount_MountConsistency int32
+
+const (
+	MountConsistencyDefault   Mount_MountConsistency = 0
+	MountConsistencyFull      Mount_MountConsistency = 1
+	MountConsistencyCached    Mount_MountConsistency = 2
+	MountConsistencyDelegated Mount_MountConsistency = 3
+)
+
+var Mount_MountConsistency_name = map[int32]string{
+	0: "DEFAULT",
+	1: "CONSISTENT",
+	2: "CACHED",
+	3: "DELEGATED",
+}
+var Mount_MountConsistency_value = map[string]int32{
+	"DEFAULT":    0,
+	"CONSISTENT": 1,
+	"CACHED":     2,
+	"DELEGATED":  3,
+}
+
+func (x Mount_MountConsistency) String() string {
+	return proto.EnumName(Mount_MountConsistency_name, int32(x))
+}
+func (Mount_MountConsistency) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptorTypes, []int{16, 1}
+}
+
 type Mount_BindOptions_MountPropagation int32
 
 const (
@@ -871,7 +901,8 @@
 	// Target path in container
 	Target string `protobuf:"bytes,3,opt,name=target,proto3" json:"target,omitempty"`
 	// ReadOnly should be set to true if the mount should not be writable.
-	ReadOnly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"`
+	ReadOnly    bool                   `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"`
+	Consistency Mount_MountConsistency `protobuf:"varint,8,opt,name=consistency,proto3,enum=docker.swarmkit.v1.Mount_MountConsistency" json:"consistency,omitempty"`
 	// BindOptions configures properties of a bind mount type.
 	//
 	// For mounts of type bind, the source must be an absolute host path.
@@ -1054,12 +1085,17 @@
 	// because the task is prepared, we would put "already prepared" in this
 	// field.
 	Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
-	// Err is set if the task is in an error state.
+	// Err is set if the task is in an error state, or is unable to
+	// progress from an earlier state because a precondition is
+	// unsatisfied.
 	//
 	// The following states should report a companion error:
 	//
 	// 	FAILED, REJECTED
 	//
+	// In general, messages that should be surfaced to users belong in the
+	// Err field, and notes on routine state transitions belong in Message.
+	//
 	// TODO(stevvooe) Integrate this field with the error interface.
 	Err string `protobuf:"bytes,4,opt,name=err,proto3" json:"err,omitempty"`
 	// Container status contains container specific status information.
@@ -2127,6 +2163,7 @@
 	proto.RegisterEnum("docker.swarmkit.v1.RaftMemberStatus_Reachability", RaftMemberStatus_Reachability_name, RaftMemberStatus_Reachability_value)
 	proto.RegisterEnum("docker.swarmkit.v1.NodeStatus_State", NodeStatus_State_name, NodeStatus_State_value)
 	proto.RegisterEnum("docker.swarmkit.v1.Mount_MountType", Mount_MountType_name, Mount_MountType_value)
+	proto.RegisterEnum("docker.swarmkit.v1.Mount_MountConsistency", Mount_MountConsistency_name, Mount_MountConsistency_value)
 	proto.RegisterEnum("docker.swarmkit.v1.Mount_BindOptions_MountPropagation", Mount_BindOptions_MountPropagation_name, Mount_BindOptions_MountPropagation_value)
 	proto.RegisterEnum("docker.swarmkit.v1.RestartPolicy_RestartCondition", RestartPolicy_RestartCondition_name, RestartPolicy_RestartCondition_value)
 	proto.RegisterEnum("docker.swarmkit.v1.UpdateConfig_FailureAction", UpdateConfig_FailureAction_name, UpdateConfig_FailureAction_value)
@@ -4160,6 +4197,11 @@
 		}
 		i += n12
 	}
+	if m.Consistency != 0 {
+		dAtA[i] = 0x40
+		i++
+		i = encodeVarintTypes(dAtA, i, uint64(m.Consistency))
+	}
 	return i, nil
 }
 
@@ -6398,6 +6440,9 @@
 		l = m.TmpfsOptions.Size()
 		n += 1 + l + sovTypes(uint64(l))
 	}
+	if m.Consistency != 0 {
+		n += 1 + sovTypes(uint64(m.Consistency))
+	}
 	return n
 }
 
@@ -7498,6 +7543,7 @@
 		`BindOptions:` + strings.Replace(fmt.Sprintf("%v", this.BindOptions), "Mount_BindOptions", "Mount_BindOptions", 1) + `,`,
 		`VolumeOptions:` + strings.Replace(fmt.Sprintf("%v", this.VolumeOptions), "Mount_VolumeOptions", "Mount_VolumeOptions", 1) + `,`,
 		`TmpfsOptions:` + strings.Replace(fmt.Sprintf("%v", this.TmpfsOptions), "Mount_TmpfsOptions", "Mount_TmpfsOptions", 1) + `,`,
+		`Consistency:` + fmt.Sprintf("%v", this.Consistency) + `,`,
 		`}`,
 	}, "")
 	return s
@@ -10464,6 +10510,25 @@
 				return err
 			}
 			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Consistency", wireType)
+			}
+			m.Consistency = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowTypes
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Consistency |= (Mount_MountConsistency(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
 		default:
 			iNdEx = preIndex
 			skippy, err := skipTypes(dAtA[iNdEx:])
@@ -16964,312 +17029,319 @@
 func init() { proto.RegisterFile("github.com/docker/swarmkit/api/types.proto", fileDescriptorTypes) }
 
 var fileDescriptorTypes = []byte{
-	// 4905 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x7a, 0x4d, 0x6c, 0x24, 0x49,
-	0x56, 0xbf, 0xeb, 0xd3, 0x55, 0xaf, 0xca, 0x76, 0x3a, 0xda, 0xdb, 0xe3, 0xae, 0xed, 0xb1, 0x3d,
-	0x39, 0xd3, 0x3b, 0x33, 0xbd, 0xf3, 0xaf, 0xfe, 0x9a, 0x19, 0xf5, 0xcc, 0xfc, 0xe7, 0xa3, 0xbe,
-	0xdc, 0xae, 0x6d, 0xbb, 0xaa, 0x14, 0x55, 0xee, 0xde, 0x41, 0x82, 0x54, 0x3a, 0x33, 0x5c, 0xce,
-	0x71, 0x56, 0x46, 0x91, 0x99, 0x65, 0x77, 0xb1, 0x20, 0x5a, 0x1c, 0x00, 0xf9, 0x04, 0xb7, 0x45,
-	0xc8, 0x5c, 0xe0, 0x84, 0x90, 0x38, 0x80, 0x84, 0xe0, 0x34, 0x48, 0x1c, 0xe6, 0xc6, 0x02, 0x12,
-	0x5a, 0x81, 0x64, 0x18, 0x1f, 0xb8, 0x21, 0xb8, 0xac, 0xb8, 0x80, 0x84, 0xe2, 0x23, 0xb3, 0xb2,
-	0xaa, 0xd3, 0x76, 0x0f, 0xb3, 0x17, 0x3b, 0xe3, 0xbd, 0xdf, 0x7b, 0xf1, 0xe2, 0x45, 0xc4, 0x8b,
-	0xf7, 0x22, 0x0a, 0x6e, 0xf7, 0x2d, 0xff, 0x60, 0xb4, 0x57, 0x36, 0xe8, 0xe0, 0x8e, 0x49, 0x8d,
-	0x43, 0xe2, 0xde, 0xf1, 0x8e, 0x75, 0x77, 0x70, 0x68, 0xf9, 0x77, 0xf4, 0xa1, 0x75, 0xc7, 0x1f,
-	0x0f, 0x89, 0x57, 0x1e, 0xba, 0xd4, 0xa7, 0x08, 0x09, 0x40, 0x39, 0x00, 0x94, 0x8f, 0xee, 0x95,
-	0xd6, 0xfb, 0x94, 0xf6, 0x6d, 0x72, 0x87, 0x23, 0xf6, 0x46, 0xfb, 0x77, 0x7c, 0x6b, 0x40, 0x3c,
-	0x5f, 0x1f, 0x0c, 0x85, 0x50, 0x69, 0x6d, 0x16, 0x60, 0x8e, 0x5c, 0xdd, 0xb7, 0xa8, 0x23, 0xf9,
-	0x2b, 0x7d, 0xda, 0xa7, 0xfc, 0xf3, 0x0e, 0xfb, 0x12, 0x54, 0x75, 0x1d, 0xe6, 0x9f, 0x10, 0xd7,
-	0xb3, 0xa8, 0x83, 0x56, 0x20, 0x63, 0x39, 0x26, 0x79, 0xb6, 0x9a, 0xd8, 0x48, 0xbc, 0x95, 0xc6,
-	0xa2, 0xa1, 0xde, 0x05, 0x68, 0xb2, 0x8f, 0x86, 0xe3, 0xbb, 0x63, 0xa4, 0x40, 0xea, 0x90, 0x8c,
-	0x39, 0x22, 0x8f, 0xd9, 0x27, 0xa3, 0x1c, 0xe9, 0xf6, 0x6a, 0x52, 0x50, 0x8e, 0x74, 0x5b, 0xfd,
-	0x3a, 0x01, 0x85, 0x8a, 0xe3, 0x50, 0x9f, 0xf7, 0xee, 0x21, 0x04, 0x69, 0x47, 0x1f, 0x10, 0x29,
-	0xc4, 0xbf, 0x51, 0x0d, 0xb2, 0xb6, 0xbe, 0x47, 0x6c, 0x6f, 0x35, 0xb9, 0x91, 0x7a, 0xab, 0x70,
-	0xff, 0xfb, 0xe5, 0x17, 0x87, 0x5c, 0x8e, 0x28, 0x29, 0x6f, 0x73, 0x34, 0x37, 0x02, 0x4b, 0x51,
-	0xf4, 0x09, 0xcc, 0x5b, 0x8e, 0x69, 0x19, 0xc4, 0x5b, 0x4d, 0x73, 0x2d, 0x6b, 0x71, 0x5a, 0x26,
-	0xd6, 0x57, 0xd3, 0x5f, 0x9d, 0xad, 0xcf, 0xe1, 0x40, 0xa8, 0xf4, 0x01, 0x14, 0x22, 0x6a, 0x63,
-	0xc6, 0xb6, 0x02, 0x99, 0x23, 0xdd, 0x1e, 0x11, 0x39, 0x3a, 0xd1, 0xf8, 0x30, 0xf9, 0x30, 0xa1,
-	0x7e, 0x06, 0x2b, 0x2d, 0x7d, 0x40, 0xcc, 0x47, 0xc4, 0x21, 0xae, 0x65, 0x60, 0xe2, 0xd1, 0x91,
-	0x6b, 0x10, 0x36, 0xd6, 0x43, 0xcb, 0x31, 0x83, 0xb1, 0xb2, 0xef, 0x78, 0x2d, 0x6a, 0x0d, 0x5e,
-	0xa9, 0x5b, 0x9e, 0xe1, 0x12, 0x9f, 0x7c, 0x63, 0x25, 0xa9, 0x40, 0xc9, 0x59, 0x02, 0x96, 0x66,
-	0xa5, 0x7f, 0x01, 0xae, 0x31, 0x17, 0x9b, 0x9a, 0x2b, 0x29, 0x9a, 0x37, 0x24, 0x06, 0x57, 0x56,
-	0xb8, 0xff, 0x56, 0x9c, 0x87, 0xe2, 0x46, 0xb2, 0x35, 0x87, 0x97, 0xb9, 0x9a, 0x80, 0xd0, 0x1d,
-	0x12, 0x03, 0x19, 0x70, 0xdd, 0x94, 0x46, 0xcf, 0xa8, 0x4f, 0x72, 0xf5, 0xb1, 0xd3, 0x78, 0xc1,
-	0x30, 0xb7, 0xe6, 0xf0, 0x4a, 0xa0, 0x2c, 0xda, 0x49, 0x15, 0x20, 0x17, 0xe8, 0x56, 0x7f, 0x9c,
-	0x80, 0x7c, 0xc0, 0xf4, 0xd0, 0xdb, 0x90, 0x77, 0x74, 0x87, 0x6a, 0xc6, 0x70, 0xe4, 0xf1, 0x01,
-	0xa5, 0xaa, 0xc5, 0xf3, 0xb3, 0xf5, 0x5c, 0x4b, 0x77, 0x68, 0xad, 0xb3, 0xeb, 0xe1, 0x1c, 0x63,
-	0xd7, 0x86, 0x23, 0x0f, 0xbd, 0x06, 0xc5, 0x01, 0x19, 0x50, 0x77, 0xac, 0xed, 0x8d, 0x7d, 0xe2,
-	0x49, 0xb7, 0x15, 0x04, 0xad, 0xca, 0x48, 0xe8, 0x63, 0x98, 0xef, 0x0b, 0x93, 0x56, 0x53, 0x7c,
-	0xf9, 0xbc, 0x1e, 0x67, 0xfd, 0x8c, 0xd5, 0x38, 0x90, 0x51, 0x7f, 0x27, 0x01, 0x2b, 0x21, 0x95,
-	0xfc, 0xf2, 0xc8, 0x72, 0xc9, 0x80, 0x38, 0xbe, 0x87, 0xde, 0x83, 0xac, 0x6d, 0x0d, 0x2c, 0xdf,
-	0x93, 0x3e, 0x7f, 0x35, 0x4e, 0x6d, 0x38, 0x28, 0x2c, 0xc1, 0xa8, 0x02, 0x45, 0x97, 0x78, 0xc4,
-	0x3d, 0x12, 0x2b, 0x5e, 0x7a, 0xf4, 0x0a, 0xe1, 0x29, 0x11, 0x75, 0x13, 0x72, 0x1d, 0x5b, 0xf7,
-	0xf7, 0xa9, 0x3b, 0x40, 0x2a, 0x14, 0x75, 0xd7, 0x38, 0xb0, 0x7c, 0x62, 0xf8, 0x23, 0x37, 0xd8,
-	0x7d, 0x53, 0x34, 0x74, 0x1d, 0x92, 0x54, 0x74, 0x94, 0xaf, 0x66, 0xcf, 0xcf, 0xd6, 0x93, 0xed,
-	0x2e, 0x4e, 0x52, 0x4f, 0xfd, 0x08, 0x96, 0x3b, 0xf6, 0xa8, 0x6f, 0x39, 0x75, 0xe2, 0x19, 0xae,
-	0x35, 0x64, 0xda, 0xd9, 0xaa, 0x64, 0x31, 0x2a, 0x58, 0x95, 0xec, 0x3b, 0xdc, 0xda, 0xc9, 0xc9,
-	0xd6, 0x56, 0x7f, 0x2b, 0x09, 0xcb, 0x0d, 0xa7, 0x6f, 0x39, 0x24, 0x2a, 0x7d, 0x0b, 0x16, 0x09,
-	0x27, 0x6a, 0x47, 0x22, 0xdc, 0x48, 0x3d, 0x0b, 0x82, 0x1a, 0xc4, 0xa0, 0xe6, 0x4c, 0x5c, 0xb8,
-	0x17, 0x37, 0xfc, 0x17, 0xb4, 0xc7, 0x46, 0x87, 0x06, 0xcc, 0x0f, 0xf9, 0x20, 0x3c, 0x39, 0xbd,
-	0xb7, 0xe2, 0x74, 0xbd, 0x30, 0xce, 0x20, 0x48, 0x48, 0xd9, 0x6f, 0x13, 0x24, 0xfe, 0x24, 0x09,
-	0x4b, 0x2d, 0x6a, 0x4e, 0xf9, 0xa1, 0x04, 0xb9, 0x03, 0xea, 0xf9, 0x91, 0x80, 0x18, 0xb6, 0xd1,
-	0x43, 0xc8, 0x0d, 0xe5, 0xf4, 0xc9, 0xd9, 0xbf, 0x19, 0x6f, 0xb2, 0xc0, 0xe0, 0x10, 0x8d, 0x3e,
-	0x82, 0x7c, 0xb0, 0x65, 0xd8, 0x68, 0x5f, 0x62, 0xe1, 0x4c, 0xf0, 0xe8, 0x63, 0xc8, 0x8a, 0x49,
-	0x58, 0x4d, 0x73, 0xc9, 0x5b, 0x2f, 0xe5, 0x73, 0x2c, 0x85, 0xd0, 0x23, 0xc8, 0xf9, 0xb6, 0xa7,
-	0x59, 0xce, 0x3e, 0x5d, 0xcd, 0x70, 0x05, 0xeb, 0xb1, 0x41, 0x86, 0x9a, 0xa4, 0xb7, 0xdd, 0x6d,
-	0x3a, 0xfb, 0xb4, 0x5a, 0x38, 0x3f, 0x5b, 0x9f, 0x97, 0x0d, 0x3c, 0xef, 0xdb, 0x1e, 0xfb, 0x50,
-	0x7f, 0x37, 0x01, 0x85, 0x08, 0x0a, 0xbd, 0x0a, 0xe0, 0xbb, 0x23, 0xcf, 0xd7, 0x5c, 0x4a, 0x7d,
-	0xee, 0xac, 0x22, 0xce, 0x73, 0x0a, 0xa6, 0xd4, 0x47, 0x65, 0xb8, 0x66, 0x10, 0xd7, 0xd7, 0x2c,
-	0xcf, 0x1b, 0x11, 0x57, 0xf3, 0x46, 0x7b, 0x5f, 0x10, 0xc3, 0xe7, 0x8e, 0x2b, 0xe2, 0x65, 0xc6,
-	0x6a, 0x72, 0x4e, 0x57, 0x30, 0xd0, 0x03, 0xb8, 0x1e, 0xc5, 0x0f, 0x47, 0x7b, 0xb6, 0x65, 0x68,
-	0x6c, 0x32, 0x53, 0x5c, 0xe4, 0xda, 0x44, 0xa4, 0xc3, 0x79, 0x8f, 0xc9, 0x58, 0xfd, 0x69, 0x02,
-	0x14, 0xac, 0xef, 0xfb, 0x3b, 0x64, 0xb0, 0x47, 0xdc, 0xae, 0xaf, 0xfb, 0x23, 0x0f, 0x5d, 0x87,
-	0xac, 0x4d, 0x74, 0x93, 0xb8, 0xdc, 0xa8, 0x1c, 0x96, 0x2d, 0xb4, 0xcb, 0x76, 0xb0, 0x6e, 0x1c,
-	0xe8, 0x7b, 0x96, 0x6d, 0xf9, 0x63, 0x6e, 0xca, 0x62, 0xfc, 0x12, 0x9e, 0xd5, 0x59, 0xc6, 0x11,
-	0x41, 0x3c, 0xa5, 0x06, 0xad, 0xc2, 0xfc, 0x80, 0x78, 0x9e, 0xde, 0x27, 0xdc, 0xd2, 0x3c, 0x0e,
-	0x9a, 0xea, 0x47, 0x50, 0x8c, 0xca, 0xa1, 0x02, 0xcc, 0xef, 0xb6, 0x1e, 0xb7, 0xda, 0x4f, 0x5b,
-	0xca, 0x1c, 0x5a, 0x82, 0xc2, 0x6e, 0x0b, 0x37, 0x2a, 0xb5, 0xad, 0x4a, 0x75, 0xbb, 0xa1, 0x24,
-	0xd0, 0x02, 0xe4, 0x27, 0xcd, 0xa4, 0xfa, 0x67, 0x09, 0x00, 0xe6, 0x6e, 0x39, 0xa8, 0x0f, 0x21,
-	0xe3, 0xf9, 0xba, 0x2f, 0x56, 0xe5, 0xe2, 0xfd, 0x37, 0x2e, 0x9a, 0x43, 0x69, 0x2f, 0xfb, 0x47,
-	0xb0, 0x10, 0x89, 0x5a, 0x98, 0x9c, 0xb2, 0x90, 0x05, 0x08, 0xdd, 0x34, 0x5d, 0x69, 0x38, 0xff,
-	0x56, 0x3f, 0x82, 0x0c, 0x97, 0x9e, 0x36, 0x37, 0x07, 0xe9, 0x3a, 0xfb, 0x4a, 0xa0, 0x3c, 0x64,
-	0x70, 0xa3, 0x52, 0xff, 0x5c, 0x49, 0x22, 0x05, 0x8a, 0xf5, 0x66, 0xb7, 0xd6, 0x6e, 0xb5, 0x1a,
-	0xb5, 0x5e, 0xa3, 0xae, 0xa4, 0xd4, 0x5b, 0x90, 0x69, 0x0e, 0x98, 0xe6, 0x9b, 0x6c, 0xc9, 0xef,
-	0x13, 0x97, 0x38, 0x46, 0xb0, 0x93, 0x26, 0x04, 0xf5, 0x27, 0x79, 0xc8, 0xec, 0xd0, 0x91, 0xe3,
-	0xa3, 0xfb, 0x91, 0xb0, 0xb5, 0x18, 0x9f, 0x21, 0x70, 0x60, 0xb9, 0x37, 0x1e, 0x12, 0x19, 0xd6,
-	0xae, 0x43, 0x56, 0x6c, 0x0e, 0x39, 0x1c, 0xd9, 0x62, 0x74, 0x5f, 0x77, 0xfb, 0xc4, 0x97, 0xe3,
-	0x91, 0x2d, 0xf4, 0x16, 0x3b, 0xb1, 0x74, 0x93, 0x3a, 0xf6, 0x98, 0xef, 0xa1, 0x9c, 0x38, 0x96,
-	0x30, 0xd1, 0xcd, 0xb6, 0x63, 0x8f, 0x71, 0xc8, 0x45, 0x5b, 0x50, 0xdc, 0xb3, 0x1c, 0x53, 0xa3,
-	0x43, 0x11, 0xe4, 0x33, 0x17, 0xef, 0x38, 0x61, 0x55, 0xd5, 0x72, 0xcc, 0xb6, 0x00, 0xe3, 0xc2,
-	0xde, 0xa4, 0x81, 0x5a, 0xb0, 0x78, 0x44, 0xed, 0xd1, 0x80, 0x84, 0xba, 0xb2, 0x5c, 0xd7, 0x9b,
-	0x17, 0xeb, 0x7a, 0xc2, 0xf1, 0x81, 0xb6, 0x85, 0xa3, 0x68, 0x13, 0x3d, 0x86, 0x05, 0x7f, 0x30,
-	0xdc, 0xf7, 0x42, 0x75, 0xf3, 0x5c, 0xdd, 0xf7, 0x2e, 0x71, 0x18, 0x83, 0x07, 0xda, 0x8a, 0x7e,
-	0xa4, 0x55, 0xfa, 0x8d, 0x14, 0x14, 0x22, 0x96, 0xa3, 0x2e, 0x14, 0x86, 0x2e, 0x1d, 0xea, 0x7d,
-	0x7e, 0x50, 0xc9, 0xb9, 0xb8, 0xf7, 0x52, 0xa3, 0x2e, 0x77, 0x26, 0x82, 0x38, 0xaa, 0x45, 0x3d,
-	0x4d, 0x42, 0x21, 0xc2, 0x44, 0xb7, 0x21, 0x87, 0x3b, 0xb8, 0xf9, 0xa4, 0xd2, 0x6b, 0x28, 0x73,
-	0xa5, 0x9b, 0x27, 0xa7, 0x1b, 0xab, 0x5c, 0x5b, 0x54, 0x41, 0xc7, 0xb5, 0x8e, 0xd8, 0xd2, 0x7b,
-	0x0b, 0xe6, 0x03, 0x68, 0xa2, 0xf4, 0xdd, 0x93, 0xd3, 0x8d, 0x57, 0x66, 0xa1, 0x11, 0x24, 0xee,
-	0x6e, 0x55, 0x70, 0xa3, 0xae, 0x24, 0xe3, 0x91, 0xb8, 0x7b, 0xa0, 0xbb, 0xc4, 0x44, 0xdf, 0x83,
-	0xac, 0x04, 0xa6, 0x4a, 0xa5, 0x93, 0xd3, 0x8d, 0xeb, 0xb3, 0xc0, 0x09, 0x0e, 0x77, 0xb7, 0x2b,
-	0x4f, 0x1a, 0x4a, 0x3a, 0x1e, 0x87, 0xbb, 0xb6, 0x7e, 0x44, 0xd0, 0x1b, 0x90, 0x11, 0xb0, 0x4c,
-	0xe9, 0xc6, 0xc9, 0xe9, 0xc6, 0x77, 0x5e, 0x50, 0xc7, 0x50, 0xa5, 0xd5, 0xdf, 0xfe, 0xc3, 0xb5,
-	0xb9, 0xbf, 0xfa, 0xa3, 0x35, 0x65, 0x96, 0x5d, 0xfa, 0xef, 0x04, 0x2c, 0x4c, 0x4d, 0x39, 0x52,
-	0x21, 0xeb, 0x50, 0x83, 0x0e, 0xc5, 0xf9, 0x95, 0xab, 0xc2, 0xf9, 0xd9, 0x7a, 0xb6, 0x45, 0x6b,
-	0x74, 0x38, 0xc6, 0x92, 0x83, 0x1e, 0xcf, 0x9c, 0xc0, 0x0f, 0x5e, 0x72, 0x3d, 0xc5, 0x9e, 0xc1,
-	0x9f, 0xc2, 0x82, 0xe9, 0x5a, 0x47, 0xc4, 0xd5, 0x0c, 0xea, 0xec, 0x5b, 0x7d, 0x79, 0x36, 0x95,
-	0x62, 0xd3, 0x44, 0x0e, 0xc4, 0x45, 0x21, 0x50, 0xe3, 0xf8, 0x6f, 0x71, 0xfa, 0x96, 0x9e, 0x40,
-	0x31, 0xba, 0x42, 0xd9, 0x71, 0xe2, 0x59, 0xbf, 0x42, 0x64, 0x3e, 0xc8, 0xb3, 0x47, 0x9c, 0x67,
-	0x14, 0x91, 0x0d, 0xbe, 0x09, 0xe9, 0x01, 0x35, 0x85, 0x9e, 0x85, 0xea, 0x35, 0x96, 0x04, 0xfc,
-	0xd3, 0xd9, 0x7a, 0x81, 0x7a, 0xe5, 0x4d, 0xcb, 0x26, 0x3b, 0xd4, 0x24, 0x98, 0x03, 0xd4, 0x23,
-	0x48, 0xb3, 0x50, 0x81, 0xbe, 0x0b, 0xe9, 0x6a, 0xb3, 0x55, 0x57, 0xe6, 0x4a, 0xcb, 0x27, 0xa7,
-	0x1b, 0x0b, 0xdc, 0x25, 0x8c, 0xc1, 0xd6, 0x2e, 0x5a, 0x87, 0xec, 0x93, 0xf6, 0xf6, 0xee, 0x0e,
-	0x5b, 0x5e, 0xd7, 0x4e, 0x4e, 0x37, 0x96, 0x42, 0xb6, 0x70, 0x1a, 0x7a, 0x15, 0x32, 0xbd, 0x9d,
-	0xce, 0x66, 0x57, 0x49, 0x96, 0xd0, 0xc9, 0xe9, 0xc6, 0x62, 0xc8, 0xe7, 0x36, 0x97, 0x96, 0xe5,
-	0xac, 0xe6, 0x43, 0xba, 0xfa, 0xb3, 0x24, 0x2c, 0x60, 0x56, 0xf1, 0xb9, 0x7e, 0x87, 0xda, 0x96,
-	0x31, 0x46, 0x1d, 0xc8, 0x1b, 0xd4, 0x31, 0xad, 0xc8, 0x9e, 0xba, 0x7f, 0xc1, 0xa9, 0x3f, 0x91,
-	0x0a, 0x5a, 0xb5, 0x40, 0x12, 0x4f, 0x94, 0xa0, 0x3b, 0x90, 0x31, 0x89, 0xad, 0x8f, 0x65, 0xfa,
-	0x71, 0xa3, 0x2c, 0x6a, 0xca, 0x72, 0x50, 0x53, 0x96, 0xeb, 0xb2, 0xa6, 0xc4, 0x02, 0xc7, 0xd3,
-	0x6c, 0xfd, 0x99, 0xa6, 0xfb, 0x3e, 0x19, 0x0c, 0x7d, 0x91, 0x7b, 0xa4, 0x71, 0x61, 0xa0, 0x3f,
-	0xab, 0x48, 0x12, 0xba, 0x07, 0xd9, 0x63, 0xcb, 0x31, 0xe9, 0xb1, 0x4c, 0x2f, 0x2e, 0x51, 0x2a,
-	0x81, 0xea, 0x09, 0x3b, 0x75, 0x67, 0xcc, 0x64, 0xfe, 0x6e, 0xb5, 0x5b, 0x8d, 0xc0, 0xdf, 0x92,
-	0xdf, 0x76, 0x5a, 0xd4, 0x61, 0x7b, 0x05, 0xda, 0x2d, 0x6d, 0xb3, 0xd2, 0xdc, 0xde, 0xc5, 0xcc,
-	0xe7, 0x2b, 0x27, 0xa7, 0x1b, 0x4a, 0x08, 0xd9, 0xd4, 0x2d, 0x9b, 0xe5, 0xbb, 0x37, 0x20, 0x55,
-	0x69, 0x7d, 0xae, 0x24, 0x4b, 0xca, 0xc9, 0xe9, 0x46, 0x31, 0x64, 0x57, 0x9c, 0xf1, 0x64, 0x1b,
-	0xcd, 0xf6, 0xab, 0xfe, 0x6d, 0x0a, 0x8a, 0xbb, 0x43, 0x53, 0xf7, 0x89, 0x58, 0x93, 0x68, 0x03,
-	0x0a, 0x43, 0xdd, 0xd5, 0x6d, 0x9b, 0xd8, 0x96, 0x37, 0x90, 0xd5, 0x72, 0x94, 0x84, 0x3e, 0x78,
-	0x59, 0x37, 0x56, 0x73, 0x6c, 0x9d, 0xfd, 0xf8, 0x5f, 0xd6, 0x13, 0x81, 0x43, 0x77, 0x61, 0x71,
-	0x5f, 0x58, 0xab, 0xe9, 0x06, 0x9f, 0xd8, 0x14, 0x9f, 0xd8, 0x72, 0xdc, 0xc4, 0x46, 0xcd, 0x2a,
-	0xcb, 0x41, 0x56, 0xb8, 0x14, 0x5e, 0xd8, 0x8f, 0x36, 0xd1, 0x03, 0x98, 0x1f, 0x50, 0xc7, 0xf2,
-	0xa9, 0x7b, 0xf5, 0x2c, 0x04, 0x48, 0x74, 0x1b, 0x96, 0xd9, 0xe4, 0x06, 0xf6, 0x70, 0x36, 0x3f,
-	0xb1, 0x92, 0x78, 0x69, 0xa0, 0x3f, 0x93, 0x1d, 0x62, 0x46, 0x46, 0x55, 0xc8, 0x50, 0x97, 0xa5,
-	0x44, 0x59, 0x6e, 0xee, 0x3b, 0x57, 0x9a, 0x2b, 0x1a, 0x6d, 0x26, 0x83, 0x85, 0xa8, 0xfa, 0x3e,
-	0x2c, 0x4c, 0x0d, 0x82, 0x65, 0x02, 0x9d, 0xca, 0x6e, 0xb7, 0xa1, 0xcc, 0xa1, 0x22, 0xe4, 0x6a,
-	0xed, 0x56, 0xaf, 0xd9, 0xda, 0x65, 0xa9, 0x4c, 0x11, 0x72, 0xb8, 0xbd, 0xbd, 0x5d, 0xad, 0xd4,
-	0x1e, 0x2b, 0x49, 0xb5, 0x0c, 0x85, 0x88, 0x36, 0xb4, 0x08, 0xd0, 0xed, 0xb5, 0x3b, 0xda, 0x66,
-	0x13, 0x77, 0x7b, 0x22, 0x11, 0xea, 0xf6, 0x2a, 0xb8, 0x27, 0x09, 0x09, 0xf5, 0x3f, 0x92, 0xc1,
-	0x8c, 0xca, 0xdc, 0xa7, 0x3a, 0x9d, 0xfb, 0x5c, 0x62, 0xbc, 0xcc, 0x7e, 0x26, 0x8d, 0x30, 0x07,
-	0xfa, 0x00, 0x80, 0x2f, 0x1c, 0x62, 0x6a, 0xba, 0x2f, 0x27, 0xbe, 0xf4, 0x82, 0x93, 0x7b, 0xc1,
-	0xa5, 0x0d, 0xce, 0x4b, 0x74, 0xc5, 0x47, 0x1f, 0x43, 0xd1, 0xa0, 0x83, 0xa1, 0x4d, 0xa4, 0x70,
-	0xea, 0x4a, 0xe1, 0x42, 0x88, 0xaf, 0xf8, 0xd1, 0xec, 0x2b, 0x3d, 0x9d, 0x1f, 0xfe, 0x66, 0x22,
-	0xf0, 0x4c, 0x4c, 0xc2, 0x55, 0x84, 0xdc, 0x6e, 0xa7, 0x5e, 0xe9, 0x35, 0x5b, 0x8f, 0x94, 0x04,
-	0x02, 0xc8, 0x72, 0x57, 0xd7, 0x95, 0x24, 0x4b, 0x14, 0x6b, 0xed, 0x9d, 0xce, 0x76, 0x83, 0xa7,
-	0x5c, 0x68, 0x05, 0x94, 0xc0, 0xd9, 0x1a, 0x77, 0x64, 0xa3, 0xae, 0xa4, 0xd1, 0x35, 0x58, 0x0a,
-	0xa9, 0x52, 0x32, 0x83, 0xae, 0x03, 0x0a, 0x89, 0x13, 0x15, 0x59, 0xf5, 0xd7, 0x60, 0xa9, 0x46,
-	0x1d, 0x5f, 0xb7, 0x9c, 0x30, 0x89, 0xbe, 0xcf, 0x06, 0x2d, 0x49, 0x9a, 0x25, 0x2f, 0x3b, 0xaa,
-	0x4b, 0xe7, 0x67, 0xeb, 0x85, 0x10, 0xda, 0xac, 0xb3, 0x91, 0x06, 0x0d, 0x93, 0xed, 0xdf, 0xa1,
-	0x65, 0x72, 0xe7, 0x66, 0xaa, 0xf3, 0xe7, 0x67, 0xeb, 0xa9, 0x4e, 0xb3, 0x8e, 0x19, 0x0d, 0x7d,
-	0x17, 0xf2, 0xe4, 0x99, 0xe5, 0x6b, 0x06, 0x8b, 0xe1, 0xcc, 0x81, 0x19, 0x9c, 0x63, 0x84, 0x1a,
-	0x0b, 0xd9, 0x55, 0x80, 0x0e, 0x75, 0x7d, 0xd9, 0xf3, 0xbb, 0x90, 0x19, 0x52, 0x97, 0x97, 0xe7,
-	0x17, 0x5e, 0x1a, 0x31, 0xb8, 0x58, 0xa8, 0x58, 0x80, 0xd5, 0xdf, 0x4b, 0x01, 0xf4, 0x74, 0xef,
-	0x50, 0x2a, 0x79, 0x08, 0xf9, 0xf0, 0x02, 0x4e, 0xd6, 0xf9, 0x97, 0xce, 0x76, 0x08, 0x46, 0x0f,
-	0x82, 0xc5, 0x26, 0xca, 0x83, 0xd8, 0x3a, 0x2d, 0xe8, 0x28, 0x2e, 0xc3, 0x9e, 0xae, 0x01, 0xd8,
-	0x91, 0x48, 0x5c, 0x57, 0xce, 0x3c, 0xfb, 0x44, 0x35, 0x7e, 0x2c, 0x08, 0xa7, 0xc9, 0x04, 0x33,
-	0xf6, 0x66, 0x63, 0x66, 0x46, 0xb6, 0xe6, 0xf0, 0x44, 0x0e, 0x7d, 0x0a, 0x05, 0x36, 0x6e, 0xcd,
-	0xe3, 0x3c, 0x99, 0x5b, 0x5e, 0xe8, 0x2a, 0xa1, 0x01, 0xc3, 0x70, 0xe2, 0xe5, 0x57, 0x01, 0xf4,
-	0xe1, 0xd0, 0xb6, 0x88, 0xa9, 0xed, 0x8d, 0x79, 0x32, 0x99, 0xc7, 0x79, 0x49, 0xa9, 0x8e, 0xd9,
-	0x76, 0x09, 0xd8, 0xba, 0xbf, 0x9a, 0xbb, 0xda, 0x81, 0x12, 0x5d, 0xf1, 0xab, 0x0a, 0x2c, 0xba,
-	0x23, 0x87, 0x39, 0x54, 0x5a, 0xa7, 0xfe, 0x69, 0x12, 0x5e, 0x69, 0x11, 0xff, 0x98, 0xba, 0x87,
-	0x15, 0xdf, 0xd7, 0x8d, 0x83, 0x01, 0x71, 0xe4, 0xf4, 0x45, 0x72, 0xf6, 0xc4, 0x54, 0xce, 0xbe,
-	0x0a, 0xf3, 0xba, 0x6d, 0xe9, 0x1e, 0x11, 0x89, 0x4e, 0x1e, 0x07, 0x4d, 0x56, 0x59, 0xb0, 0x3a,
-	0x85, 0x78, 0x1e, 0x11, 0x57, 0x07, 0xcc, 0xf0, 0x80, 0x80, 0x7e, 0x04, 0xd7, 0x65, 0x4a, 0xa3,
-	0x87, 0x5d, 0xb1, 0x9c, 0x39, 0xb8, 0x83, 0x6c, 0xc4, 0x16, 0x4e, 0xf1, 0xc6, 0xc9, 0x9c, 0x67,
-	0x42, 0x6e, 0x0f, 0x7d, 0x99, 0x41, 0xad, 0x98, 0x31, 0xac, 0xd2, 0x23, 0xb8, 0x71, 0xa1, 0xc8,
-	0x37, 0xba, 0x9a, 0xf8, 0x87, 0x24, 0x40, 0xb3, 0x53, 0xd9, 0x91, 0x4e, 0xaa, 0x43, 0x76, 0x5f,
-	0x1f, 0x58, 0xf6, 0xf8, 0xb2, 0x08, 0x38, 0xc1, 0x97, 0x2b, 0xc2, 0x1d, 0x9b, 0x5c, 0x06, 0x4b,
-	0x59, 0x5e, 0x36, 0x8d, 0xf6, 0x1c, 0xe2, 0x87, 0x65, 0x13, 0x6f, 0x31, 0x33, 0x5c, 0xdd, 0x09,
-	0x97, 0xae, 0x68, 0xb0, 0x09, 0xe8, 0xeb, 0x3e, 0x39, 0xd6, 0xc7, 0x41, 0xd8, 0x92, 0x4d, 0xb4,
-	0xc5, 0x2f, 0x00, 0x89, 0x7b, 0x44, 0xcc, 0xd5, 0x0c, 0x77, 0xea, 0x55, 0xf6, 0x60, 0x09, 0x17,
-	0xbe, 0x0b, 0xa5, 0x4b, 0x1f, 0xf1, 0x94, 0x69, 0xc2, 0xfa, 0x46, 0x3e, 0xba, 0x0b, 0x0b, 0x53,
-	0xe3, 0x7c, 0xa1, 0x5e, 0x6d, 0x76, 0x9e, 0xbc, 0xab, 0xa4, 0xe5, 0xd7, 0xfb, 0x4a, 0x56, 0xfd,
-	0xe3, 0x94, 0x08, 0x34, 0xd2, 0xab, 0xf1, 0x17, 0xdf, 0x39, 0xbe, 0xba, 0x0d, 0x6a, 0xcb, 0x00,
-	0xf0, 0xe6, 0xe5, 0xf1, 0x87, 0xd5, 0x3f, 0x1c, 0x8e, 0x43, 0x41, 0xb4, 0x0e, 0x05, 0xb1, 0x8a,
-	0x35, 0xb6, 0xe1, 0xb8, 0x5b, 0x17, 0x30, 0x08, 0x12, 0x93, 0x44, 0xb7, 0x60, 0x91, 0xdf, 0x6f,
-	0x78, 0x07, 0xc4, 0x14, 0x98, 0x34, 0xc7, 0x2c, 0x84, 0x54, 0x0e, 0xdb, 0x81, 0xa2, 0x24, 0x68,
-	0x3c, 0xf7, 0xcd, 0x70, 0x83, 0x6e, 0x5f, 0x65, 0x90, 0x10, 0xe1, 0x29, 0x71, 0x61, 0x38, 0x69,
-	0xa8, 0x75, 0xc8, 0x05, 0xc6, 0xa2, 0x55, 0x48, 0xf5, 0x6a, 0x1d, 0x65, 0xae, 0xb4, 0x74, 0x72,
-	0xba, 0x51, 0x08, 0xc8, 0xbd, 0x5a, 0x87, 0x71, 0x76, 0xeb, 0x1d, 0x25, 0x31, 0xcd, 0xd9, 0xad,
-	0x77, 0x4a, 0x69, 0x96, 0x83, 0xa9, 0xfb, 0x50, 0x88, 0xf4, 0x80, 0x5e, 0x87, 0xf9, 0x66, 0xeb,
-	0x11, 0x6e, 0x74, 0xbb, 0xca, 0x5c, 0xe9, 0xfa, 0xc9, 0xe9, 0x06, 0x8a, 0x70, 0x9b, 0x4e, 0x9f,
-	0xcd, 0x0f, 0x7a, 0x15, 0xd2, 0x5b, 0x6d, 0x76, 0xb6, 0x8b, 0x64, 0x3b, 0x82, 0xd8, 0xa2, 0x9e,
-	0x5f, 0xba, 0x26, 0x93, 0xbb, 0xa8, 0x62, 0xf5, 0xf7, 0x13, 0x90, 0x15, 0x9b, 0x29, 0x76, 0xa2,
-	0x2a, 0x30, 0x1f, 0x54, 0xc2, 0xa2, 0x10, 0x7a, 0xf3, 0xe2, 0xa2, 0xa5, 0x2c, 0x6b, 0x0c, 0xb1,
-	0xfc, 0x02, 0xb9, 0xd2, 0x87, 0x50, 0x8c, 0x32, 0xbe, 0xd1, 0xe2, 0xfb, 0x11, 0x14, 0xd8, 0xfa,
-	0x0e, 0x8a, 0x97, 0xfb, 0x90, 0x15, 0x01, 0x21, 0x3c, 0x6b, 0x2e, 0xae, 0xa0, 0x24, 0x12, 0x3d,
-	0x84, 0x79, 0x51, 0x75, 0x05, 0x17, 0xa0, 0x6b, 0x97, 0xef, 0x22, 0x1c, 0xc0, 0xd5, 0x4f, 0x21,
-	0xdd, 0x21, 0xc4, 0x65, 0xbe, 0x77, 0xa8, 0x49, 0x26, 0xc7, 0xb3, 0x2c, 0x18, 0x4d, 0xd2, 0xac,
-	0xb3, 0x82, 0xd1, 0x24, 0x4d, 0x33, 0xbc, 0xe2, 0x49, 0x46, 0xae, 0x78, 0x7a, 0x50, 0x7c, 0x4a,
-	0xac, 0xfe, 0x81, 0x4f, 0x4c, 0xae, 0xe8, 0x1d, 0x48, 0x0f, 0x49, 0x68, 0xfc, 0x6a, 0xec, 0x02,
-	0x23, 0xc4, 0xc5, 0x1c, 0xc5, 0xe2, 0xc8, 0x31, 0x97, 0x96, 0xb7, 0xf6, 0xb2, 0xa5, 0xfe, 0x7d,
-	0x12, 0x16, 0x9b, 0x9e, 0x37, 0xd2, 0x1d, 0x23, 0xc8, 0xdc, 0x3e, 0x99, 0xce, 0xdc, 0x62, 0x9f,
-	0x37, 0xa6, 0x45, 0xa6, 0x6f, 0xae, 0xe4, 0xe9, 0x99, 0x0c, 0x4f, 0x4f, 0xf5, 0xdf, 0x13, 0xc1,
-	0xf5, 0xd4, 0xad, 0xc8, 0x76, 0x2f, 0xad, 0x9e, 0x9c, 0x6e, 0xac, 0x44, 0x35, 0x91, 0x5d, 0xe7,
-	0xd0, 0xa1, 0xc7, 0x0e, 0x7a, 0x0d, 0x32, 0xb8, 0xd1, 0x6a, 0x3c, 0x55, 0x12, 0x62, 0x79, 0x4e,
-	0x81, 0x30, 0x71, 0xc8, 0x31, 0xd3, 0xd4, 0x69, 0xb4, 0xea, 0x2c, 0xd3, 0x4a, 0xc6, 0x68, 0xea,
-	0x10, 0xc7, 0xb4, 0x9c, 0x3e, 0x7a, 0x1d, 0xb2, 0xcd, 0x6e, 0x77, 0x97, 0x5f, 0x20, 0xbc, 0x72,
-	0x72, 0xba, 0x71, 0x6d, 0x0a, 0xc5, 0xaf, 0x26, 0x4d, 0x06, 0x62, 0x65, 0x0e, 0xcb, 0xc1, 0x62,
-	0x40, 0x2c, 0x7f, 0x16, 0x20, 0xdc, 0xee, 0x55, 0x7a, 0x0d, 0x25, 0x13, 0x03, 0xc2, 0x94, 0xfd,
-	0x95, 0xdb, 0xed, 0x9f, 0x93, 0xa0, 0x54, 0x0c, 0x83, 0x0c, 0x7d, 0xc6, 0x97, 0x95, 0x65, 0x0f,
-	0x72, 0x43, 0xf6, 0x65, 0x91, 0x20, 0x4b, 0x7a, 0x18, 0xfb, 0x40, 0x37, 0x23, 0x57, 0xc6, 0xd4,
-	0x26, 0x15, 0x73, 0x60, 0x79, 0x9e, 0x45, 0x1d, 0x41, 0xc3, 0xa1, 0xa6, 0xd2, 0x7f, 0x26, 0xe0,
-	0x5a, 0x0c, 0x02, 0xdd, 0x85, 0xb4, 0x4b, 0xed, 0x60, 0x0e, 0x6f, 0x5e, 0x74, 0xf3, 0xc8, 0x44,
-	0x31, 0x47, 0xa2, 0x35, 0x00, 0x7d, 0xe4, 0x53, 0x9d, 0xf7, 0xcf, 0x67, 0x2f, 0x87, 0x23, 0x14,
-	0xf4, 0x14, 0xb2, 0x1e, 0x31, 0x5c, 0x12, 0xe4, 0xd2, 0x9f, 0xfe, 0x5f, 0xad, 0x2f, 0x77, 0xb9,
-	0x1a, 0x2c, 0xd5, 0x95, 0xca, 0x90, 0x15, 0x14, 0xb6, 0xec, 0x4d, 0xdd, 0xd7, 0xe5, 0xbd, 0x34,
-	0xff, 0x66, 0xab, 0x49, 0xb7, 0xfb, 0xc1, 0x6a, 0xd2, 0xed, 0xbe, 0xfa, 0x37, 0x49, 0x80, 0xc6,
-	0x33, 0x9f, 0xb8, 0x8e, 0x6e, 0xd7, 0x2a, 0xa8, 0x11, 0x89, 0xfe, 0x62, 0xb4, 0x6f, 0xc7, 0x5e,
-	0xb6, 0x87, 0x12, 0xe5, 0x5a, 0x25, 0x26, 0xfe, 0xdf, 0x80, 0xd4, 0xc8, 0x95, 0x6f, 0xae, 0x22,
-	0x0f, 0xde, 0xc5, 0xdb, 0x98, 0xd1, 0x50, 0x63, 0x12, 0xb6, 0x52, 0x17, 0xbf, 0xac, 0x46, 0x3a,
-	0x88, 0x0d, 0x5d, 0x6c, 0xe7, 0x1b, 0xba, 0x66, 0x10, 0x79, 0x72, 0x14, 0xc5, 0xce, 0xaf, 0x55,
-	0x6a, 0xc4, 0xf5, 0x71, 0xd6, 0xd0, 0xd9, 0xff, 0x6f, 0x15, 0xdf, 0xde, 0x01, 0x98, 0x0c, 0x0d,
-	0xad, 0x41, 0xa6, 0xb6, 0xd9, 0xed, 0x6e, 0x2b, 0x73, 0x22, 0x80, 0x4f, 0x58, 0x9c, 0xac, 0xfe,
-	0x65, 0x12, 0x72, 0xb5, 0x8a, 0x3c, 0x56, 0x6b, 0xa0, 0xf0, 0xa8, 0xc4, 0x6f, 0xf3, 0xc9, 0xb3,
-	0xa1, 0xe5, 0x8e, 0x65, 0x60, 0xb9, 0xa4, 0xa8, 0x5d, 0x64, 0x22, 0xcc, 0xea, 0x06, 0x17, 0x40,
-	0x18, 0x8a, 0x44, 0x3a, 0x41, 0x33, 0xf4, 0x20, 0xc6, 0xaf, 0x5d, 0xee, 0x2c, 0x51, 0x9e, 0x4c,
-	0xda, 0x1e, 0x2e, 0x04, 0x4a, 0x6a, 0xba, 0x87, 0x3e, 0x80, 0x25, 0xcf, 0xea, 0x3b, 0x96, 0xd3,
-	0xd7, 0x02, 0xe7, 0xf1, 0xa7, 0x85, 0xea, 0xf2, 0xf9, 0xd9, 0xfa, 0x42, 0x57, 0xb0, 0xa4, 0x0f,
-	0x17, 0x24, 0xb2, 0xc6, 0x5d, 0x89, 0xde, 0x87, 0xc5, 0x88, 0x28, 0xf3, 0xa2, 0x70, 0xbb, 0x72,
-	0x7e, 0xb6, 0x5e, 0x0c, 0x25, 0x1f, 0x93, 0x31, 0x2e, 0x86, 0x82, 0x8f, 0x09, 0xbf, 0x7f, 0xd9,
-	0xa7, 0xae, 0x41, 0x34, 0x97, 0xef, 0x69, 0x7e, 0x82, 0xa7, 0x71, 0x81, 0xd3, 0xc4, 0x36, 0x57,
-	0x9f, 0xc0, 0xb5, 0xb6, 0x6b, 0x1c, 0x10, 0xcf, 0x17, 0xae, 0x90, 0x5e, 0xfc, 0x14, 0x6e, 0xfa,
-	0xba, 0x77, 0xa8, 0x1d, 0x58, 0x9e, 0x4f, 0xdd, 0xb1, 0xe6, 0x12, 0x9f, 0x38, 0x8c, 0xaf, 0xf1,
-	0xf7, 0x48, 0x79, 0x41, 0x76, 0x83, 0x61, 0xb6, 0x04, 0x04, 0x07, 0x88, 0x6d, 0x06, 0x50, 0x9b,
-	0x50, 0x64, 0x65, 0x4a, 0x9d, 0xec, 0xeb, 0x23, 0xdb, 0x67, 0xa3, 0x07, 0x9b, 0xf6, 0xb5, 0x97,
-	0x3e, 0xa6, 0xf2, 0x36, 0xed, 0x8b, 0x4f, 0xf5, 0x87, 0xa0, 0xd4, 0x2d, 0x6f, 0xa8, 0xfb, 0xc6,
-	0x41, 0x70, 0xf3, 0x87, 0xea, 0xa0, 0x1c, 0x10, 0xdd, 0xf5, 0xf7, 0x88, 0xee, 0x6b, 0x43, 0xe2,
-	0x5a, 0xd4, 0xbc, 0x7a, 0x96, 0x97, 0x42, 0x91, 0x0e, 0x97, 0x50, 0xff, 0x2b, 0x01, 0x80, 0xf5,
-	0xfd, 0x20, 0x23, 0xfb, 0x3e, 0x2c, 0x7b, 0x8e, 0x3e, 0xf4, 0x0e, 0xa8, 0xaf, 0x59, 0x8e, 0x4f,
-	0xdc, 0x23, 0xdd, 0x96, 0x17, 0x38, 0x4a, 0xc0, 0x68, 0x4a, 0x3a, 0x7a, 0x07, 0xd0, 0x21, 0x21,
-	0x43, 0x8d, 0xda, 0xa6, 0x16, 0x30, 0xc5, 0x6b, 0x69, 0x1a, 0x2b, 0x8c, 0xd3, 0xb6, 0xcd, 0x6e,
-	0x40, 0x47, 0x55, 0x58, 0x63, 0xc3, 0x27, 0x8e, 0xef, 0x5a, 0xc4, 0xd3, 0xf6, 0xa9, 0xab, 0x79,
-	0x36, 0x3d, 0xd6, 0xf6, 0xa9, 0x6d, 0xd3, 0x63, 0xe2, 0x06, 0x77, 0x63, 0x25, 0x9b, 0xf6, 0x1b,
-	0x02, 0xb4, 0x49, 0xdd, 0xae, 0x4d, 0x8f, 0x37, 0x03, 0x04, 0x4b, 0xdb, 0x26, 0x63, 0xf6, 0x2d,
-	0xe3, 0x30, 0x48, 0xdb, 0x42, 0x6a, 0xcf, 0x32, 0x0e, 0xd1, 0xeb, 0xb0, 0x40, 0x6c, 0xc2, 0xaf,
-	0x48, 0x04, 0x2a, 0xc3, 0x51, 0xc5, 0x80, 0xc8, 0x40, 0xea, 0x67, 0xa0, 0x34, 0x1c, 0xc3, 0x1d,
-	0x0f, 0x23, 0x73, 0xfe, 0x0e, 0x20, 0x16, 0x24, 0x35, 0x9b, 0x1a, 0x87, 0xda, 0x40, 0x77, 0xf4,
-	0x3e, 0xb3, 0x4b, 0x3c, 0x62, 0x29, 0x8c, 0xb3, 0x4d, 0x8d, 0xc3, 0x1d, 0x49, 0x57, 0x3f, 0x00,
-	0xe8, 0x0e, 0x5d, 0xa2, 0x9b, 0x6d, 0x96, 0x4d, 0x30, 0xd7, 0xf1, 0x96, 0x66, 0xca, 0x47, 0x40,
-	0xea, 0xca, 0xad, 0xae, 0x08, 0x46, 0x3d, 0xa4, 0xab, 0xbf, 0x08, 0xd7, 0x3a, 0xb6, 0x6e, 0xf0,
-	0x07, 0xf1, 0x4e, 0xf8, 0x2a, 0x83, 0x1e, 0x42, 0x56, 0x40, 0xe5, 0x4c, 0xc6, 0x6e, 0xb7, 0x49,
-	0x9f, 0x5b, 0x73, 0x58, 0xe2, 0xab, 0x45, 0x80, 0x89, 0x1e, 0xf5, 0xcf, 0x13, 0x90, 0x0f, 0xf5,
-	0xa3, 0x0d, 0x28, 0x18, 0xd4, 0x61, 0xcb, 0xdb, 0x72, 0x64, 0x55, 0x9f, 0xc7, 0x51, 0x12, 0x6a,
-	0x42, 0x61, 0x18, 0x4a, 0x5f, 0x9a, 0xcf, 0xc5, 0x58, 0x8d, 0xa3, 0xb2, 0xe8, 0x43, 0xc8, 0x07,
-	0xaf, 0xae, 0x41, 0x84, 0xbd, 0xfc, 0x91, 0x76, 0x02, 0x57, 0x3f, 0x01, 0xf8, 0x01, 0xb5, 0x9c,
-	0x1e, 0x3d, 0x24, 0x0e, 0x7f, 0x45, 0x64, 0x35, 0x21, 0x09, 0xbc, 0x28, 0x5b, 0xbc, 0xd4, 0x17,
-	0x53, 0x10, 0x3e, 0xa6, 0x89, 0xa6, 0xfa, 0xd7, 0x49, 0xc8, 0x62, 0x4a, 0xfd, 0x5a, 0x05, 0x6d,
-	0x40, 0x56, 0xc6, 0x09, 0x7e, 0xfe, 0x54, 0xf3, 0xe7, 0x67, 0xeb, 0x19, 0x11, 0x20, 0x32, 0x06,
-	0x8f, 0x0c, 0x91, 0x08, 0x9e, 0xbc, 0x28, 0x82, 0xa3, 0xbb, 0x50, 0x94, 0x20, 0xed, 0x40, 0xf7,
-	0x0e, 0x44, 0x81, 0x56, 0x5d, 0x3c, 0x3f, 0x5b, 0x07, 0x81, 0xdc, 0xd2, 0xbd, 0x03, 0x0c, 0x02,
-	0xcd, 0xbe, 0x51, 0x03, 0x0a, 0x5f, 0x50, 0xcb, 0xd1, 0x7c, 0x3e, 0x08, 0x79, 0x99, 0x18, 0x3b,
-	0x8f, 0x93, 0xa1, 0xca, 0x27, 0x75, 0xf8, 0x62, 0x32, 0xf8, 0x06, 0x2c, 0xb8, 0x94, 0xfa, 0x22,
-	0x6c, 0x59, 0xd4, 0x91, 0xf7, 0x14, 0x1b, 0xb1, 0xd7, 0xd7, 0x94, 0xfa, 0x58, 0xe2, 0x70, 0xd1,
-	0x8d, 0xb4, 0xd0, 0x5d, 0x58, 0xb1, 0x75, 0xcf, 0xd7, 0x78, 0xbc, 0x33, 0x27, 0xda, 0xb2, 0x7c,
-	0xab, 0x21, 0xc6, 0xdb, 0xe4, 0xac, 0x40, 0x42, 0xfd, 0xc7, 0x04, 0x14, 0xd8, 0x60, 0xac, 0x7d,
-	0xcb, 0x60, 0x49, 0xde, 0x37, 0xcf, 0x3d, 0x6e, 0x40, 0xca, 0xf0, 0x5c, 0xe9, 0x54, 0x7e, 0xf8,
-	0xd6, 0xba, 0x18, 0x33, 0x1a, 0xfa, 0x0c, 0xb2, 0xf2, 0xbe, 0x44, 0xa4, 0x1d, 0xea, 0xd5, 0xe9,
-	0xa8, 0xf4, 0x8d, 0x94, 0xe3, 0x6b, 0x79, 0x62, 0x9d, 0x38, 0x04, 0x70, 0x94, 0x84, 0xae, 0x43,
-	0xd2, 0x10, 0xee, 0x92, 0xbf, 0xd9, 0xa8, 0xb5, 0x70, 0xd2, 0x70, 0xd4, 0xbf, 0x4b, 0xc0, 0xc2,
-	0x64, 0xc3, 0xb3, 0x15, 0x70, 0x13, 0xf2, 0xde, 0x68, 0xcf, 0x1b, 0x7b, 0x3e, 0x19, 0x04, 0x2f,
-	0xa4, 0x21, 0x01, 0x35, 0x21, 0xaf, 0xdb, 0x7d, 0xea, 0x5a, 0xfe, 0xc1, 0x40, 0x56, 0xa2, 0xf1,
-	0xa9, 0x42, 0x54, 0x67, 0xb9, 0x12, 0x88, 0xe0, 0x89, 0x74, 0x70, 0xee, 0x8b, 0x67, 0x74, 0x7e,
-	0xee, 0xbf, 0x06, 0x45, 0x5b, 0x1f, 0xf0, 0x0b, 0x24, 0xdf, 0x1a, 0x88, 0x71, 0xa4, 0x71, 0x41,
-	0xd2, 0x7a, 0xd6, 0x80, 0xa8, 0x2a, 0xe4, 0x43, 0x65, 0x68, 0x09, 0x0a, 0x95, 0x46, 0x57, 0xbb,
-	0x77, 0xff, 0xa1, 0xf6, 0xa8, 0xb6, 0xa3, 0xcc, 0xc9, 0xdc, 0xf4, 0x2f, 0x12, 0xb0, 0x20, 0xc3,
-	0x91, 0xcc, 0xf7, 0x5f, 0x87, 0x79, 0x57, 0xdf, 0xf7, 0x83, 0x8a, 0x24, 0x2d, 0x56, 0x35, 0x8b,
-	0xf0, 0xac, 0x22, 0x61, 0xac, 0xf8, 0x8a, 0x24, 0xf2, 0x66, 0x9f, 0xba, 0xf4, 0xcd, 0x3e, 0xfd,
-	0x73, 0x79, 0xb3, 0x57, 0x7f, 0x1d, 0x60, 0xd3, 0xb2, 0x49, 0x4f, 0xdc, 0x35, 0xc5, 0xd5, 0x97,
-	0x2c, 0x87, 0x93, 0x77, 0x99, 0x41, 0x0e, 0xd7, 0xac, 0x63, 0x46, 0x63, 0xac, 0xbe, 0x65, 0xca,
-	0xcd, 0xc8, 0x59, 0x8f, 0x18, 0xab, 0x6f, 0x99, 0xe1, 0x2b, 0x55, 0xfa, 0xaa, 0x57, 0xaa, 0xd3,
-	0x04, 0x2c, 0xc9, 0xdc, 0x35, 0x0c, 0xbf, 0x6f, 0x43, 0x5e, 0xa4, 0xb1, 0x93, 0x82, 0x8e, 0xbf,
-	0x53, 0x0b, 0x5c, 0xb3, 0x8e, 0x73, 0x82, 0xdd, 0x34, 0xd1, 0x3a, 0x14, 0x24, 0x34, 0xf2, 0xfb,
-	0x1e, 0x10, 0xa4, 0x16, 0x33, 0xff, 0x5d, 0x48, 0xef, 0x5b, 0x36, 0x91, 0x0b, 0x3d, 0x36, 0x00,
-	0x4c, 0x1c, 0xb0, 0x35, 0x87, 0x39, 0xba, 0x9a, 0x0b, 0x2e, 0xe3, 0xb8, 0x7d, 0xb2, 0xec, 0x8c,
-	0xda, 0x27, 0x2a, 0xd0, 0x19, 0xfb, 0x04, 0x8e, 0xd9, 0x27, 0xd8, 0xc2, 0x3e, 0x09, 0x8d, 0xda,
-	0x27, 0x48, 0x3f, 0x17, 0xfb, 0xb6, 0xe1, 0x7a, 0xd5, 0xd6, 0x8d, 0x43, 0xdb, 0xf2, 0x7c, 0x62,
-	0x46, 0x23, 0xc6, 0x7d, 0xc8, 0x4e, 0x25, 0x9d, 0x97, 0xdd, 0x5a, 0x4a, 0xa4, 0xfa, 0x6f, 0x09,
-	0x28, 0x6e, 0x11, 0xdd, 0xf6, 0x0f, 0x26, 0x57, 0x43, 0x3e, 0xf1, 0x7c, 0x79, 0x58, 0xf1, 0x6f,
-	0xf4, 0x1e, 0xe4, 0xc2, 0x9c, 0xe4, 0xca, 0xf7, 0xb7, 0x10, 0x8a, 0x1e, 0xc0, 0x3c, 0xdb, 0x63,
-	0x74, 0x14, 0x14, 0x3b, 0x97, 0x3d, 0xed, 0x48, 0x24, 0x3b, 0x64, 0x5c, 0xc2, 0x93, 0x10, 0xbe,
-	0x94, 0x32, 0x38, 0x68, 0xa2, 0xff, 0x0f, 0x45, 0xfe, 0x32, 0x11, 0xe4, 0x5c, 0x99, 0xab, 0x74,
-	0x16, 0xc4, 0xe3, 0xa2, 0xc8, 0xb7, 0xfe, 0x27, 0x01, 0x2b, 0x3b, 0xfa, 0x78, 0x8f, 0xc8, 0xb0,
-	0x41, 0x4c, 0x4c, 0x0c, 0xea, 0x9a, 0xa8, 0x13, 0x0d, 0x37, 0x97, 0xbc, 0x55, 0xc6, 0x09, 0xc7,
-	0x47, 0x9d, 0xa0, 0x00, 0x4b, 0x46, 0x0a, 0xb0, 0x15, 0xc8, 0x38, 0xd4, 0x31, 0x88, 0x8c, 0x45,
-	0xa2, 0xa1, 0x5a, 0xd1, 0x50, 0x53, 0x0a, 0x9f, 0x11, 0xf9, 0x23, 0x60, 0x8b, 0xfa, 0x61, 0x6f,
-	0xe8, 0x33, 0x28, 0x75, 0x1b, 0x35, 0xdc, 0xe8, 0x55, 0xdb, 0x3f, 0xd4, 0xba, 0x95, 0xed, 0x6e,
-	0xe5, 0xfe, 0x5d, 0xad, 0xd3, 0xde, 0xfe, 0xfc, 0xde, 0x83, 0xbb, 0xef, 0x29, 0x89, 0xd2, 0xc6,
-	0xc9, 0xe9, 0xc6, 0xcd, 0x56, 0xa5, 0xb6, 0x2d, 0x76, 0xcc, 0x1e, 0x7d, 0xd6, 0xd5, 0x6d, 0x4f,
-	0xbf, 0x7f, 0xb7, 0x43, 0xed, 0x31, 0xc3, 0xb0, 0x65, 0x5d, 0x8c, 0x9e, 0x57, 0xd1, 0x63, 0x38,
-	0x71, 0xe1, 0x31, 0x3c, 0x39, 0xcd, 0x93, 0x17, 0x9c, 0xe6, 0x9b, 0xb0, 0x62, 0xb8, 0xd4, 0xf3,
-	0x34, 0x96, 0xfd, 0x13, 0x73, 0xa6, 0xbe, 0xf8, 0xce, 0xf9, 0xd9, 0xfa, 0x72, 0x8d, 0xf1, 0xbb,
-	0x9c, 0x2d, 0xd5, 0x2f, 0x1b, 0x11, 0x12, 0xef, 0x49, 0xfd, 0x83, 0x14, 0x4b, 0xa4, 0xac, 0x23,
-	0xcb, 0x26, 0x7d, 0xe2, 0xa1, 0x27, 0xb0, 0x64, 0xb8, 0xc4, 0x64, 0x69, 0xbd, 0x6e, 0x47, 0x7f,
-	0x27, 0xfa, 0xff, 0x62, 0x73, 0x9a, 0x50, 0xb0, 0x5c, 0x0b, 0xa5, 0xba, 0x43, 0x62, 0xe0, 0x45,
-	0x63, 0xaa, 0x8d, 0xbe, 0x80, 0x25, 0x8f, 0xd8, 0x96, 0x33, 0x7a, 0xa6, 0x19, 0xd4, 0xf1, 0xc9,
-	0xb3, 0xe0, 0x45, 0xec, 0x2a, 0xbd, 0xdd, 0xc6, 0x36, 0x93, 0xaa, 0x09, 0xa1, 0x2a, 0x3a, 0x3f,
-	0x5b, 0x5f, 0x9c, 0xa6, 0xe1, 0x45, 0xa9, 0x59, 0xb6, 0x4b, 0x2d, 0x58, 0x9c, 0xb6, 0x06, 0xad,
-	0xc8, 0xbd, 0xcf, 0x43, 0x48, 0xb0, 0xb7, 0xd1, 0x4d, 0xc8, 0xb9, 0xa4, 0x6f, 0x79, 0xbe, 0x2b,
-	0xdc, 0xcc, 0x38, 0x21, 0x85, 0xed, 0x7c, 0xf1, 0x23, 0x9f, 0xd2, 0xaf, 0xc2, 0x4c, 0x8f, 0x6c,
-	0xb3, 0x98, 0x96, 0xa7, 0xef, 0x49, 0x95, 0x39, 0x1c, 0x34, 0xd9, 0x1a, 0x1c, 0x79, 0x61, 0xa2,
-	0xc6, 0xbf, 0x19, 0x8d, 0x67, 0x14, 0xf2, 0x27, 0x4f, 0x3c, 0x67, 0x08, 0x7e, 0x3b, 0x99, 0x8e,
-	0xfc, 0x76, 0x72, 0x05, 0x32, 0x36, 0x39, 0x22, 0xb6, 0x38, 0xcb, 0xb1, 0x68, 0xdc, 0xbe, 0x0b,
-	0xc5, 0xe0, 0x47, 0x7a, 0xfc, 0x57, 0x06, 0x39, 0x48, 0xf7, 0x2a, 0xdd, 0xc7, 0xca, 0x1c, 0x02,
-	0xc8, 0x8a, 0xc5, 0x29, 0x5e, 0xeb, 0x6a, 0xed, 0xd6, 0x66, 0xf3, 0x91, 0x92, 0xbc, 0xfd, 0xb3,
-	0x14, 0xe4, 0xc3, 0xf7, 0x22, 0x76, 0x76, 0xb4, 0x1a, 0x4f, 0x83, 0xd5, 0x1d, 0xd2, 0x5b, 0xe4,
-	0x18, 0xbd, 0x36, 0xb9, 0x85, 0xfa, 0x4c, 0x3c, 0x90, 0x87, 0xec, 0xe0, 0x06, 0xea, 0x0d, 0xc8,
-	0x55, 0xba, 0xdd, 0xe6, 0xa3, 0x56, 0xa3, 0xae, 0x7c, 0x99, 0x28, 0x7d, 0xe7, 0xe4, 0x74, 0x63,
-	0x39, 0x04, 0x55, 0x3c, 0xb1, 0xf8, 0x38, 0xaa, 0x56, 0x6b, 0x74, 0x7a, 0x8d, 0xba, 0xf2, 0x3c,
-	0x39, 0x8b, 0xe2, 0xb7, 0x2a, 0xfc, 0x67, 0x2e, 0xf9, 0x0e, 0x6e, 0x74, 0x2a, 0x98, 0x75, 0xf8,
-	0x65, 0x52, 0x5c, 0x8e, 0x4d, 0x7a, 0x74, 0xc9, 0x50, 0x77, 0x59, 0x9f, 0x6b, 0xc1, 0xcf, 0xbd,
-	0x9e, 0xa7, 0xc4, 0x4f, 0x21, 0x26, 0x8f, 0x5f, 0x44, 0x37, 0xc7, 0xac, 0x37, 0xfe, 0xea, 0xc8,
-	0xd5, 0xa4, 0x66, 0x7a, 0xeb, 0xb2, 0xd8, 0xc3, 0xb4, 0xa8, 0x30, 0x8f, 0x77, 0x5b, 0x2d, 0x06,
-	0x7a, 0x9e, 0x9e, 0x19, 0x1d, 0x1e, 0x39, 0xac, 0x62, 0x46, 0xb7, 0x20, 0x17, 0x3c, 0x4a, 0x2a,
-	0x5f, 0xa6, 0x67, 0x0c, 0xaa, 0x05, 0x2f, 0xaa, 0xbc, 0xc3, 0xad, 0xdd, 0x1e, 0xff, 0x35, 0xda,
-	0xf3, 0xcc, 0x6c, 0x87, 0x07, 0x23, 0xdf, 0xa4, 0xc7, 0x0e, 0xdb, 0xb3, 0xf2, 0x1e, 0xee, 0xcb,
-	0x8c, 0xb8, 0xb4, 0x08, 0x31, 0xf2, 0x12, 0xee, 0x0d, 0xc8, 0xe1, 0xc6, 0x0f, 0xc4, 0x0f, 0xd7,
-	0x9e, 0x67, 0x67, 0xf4, 0x60, 0xf2, 0x05, 0x31, 0x64, 0x6f, 0x6d, 0xdc, 0xd9, 0xaa, 0x70, 0x97,
-	0xcf, 0xa2, 0xda, 0xee, 0xf0, 0x40, 0x77, 0x88, 0x39, 0xf9, 0x3d, 0x48, 0xc8, 0xba, 0xfd, 0x4b,
-	0x90, 0x0b, 0x32, 0x53, 0xb4, 0x06, 0xd9, 0xa7, 0x6d, 0xfc, 0xb8, 0x81, 0x95, 0x39, 0xe1, 0xc3,
-	0x80, 0xf3, 0x54, 0xd4, 0x14, 0x1b, 0x30, 0xbf, 0x53, 0x69, 0x55, 0x1e, 0x35, 0x70, 0x70, 0x45,
-	0x1e, 0x00, 0x64, 0x7a, 0x55, 0x52, 0x64, 0x07, 0xa1, 0xce, 0xea, 0xea, 0x57, 0x5f, 0xaf, 0xcd,
-	0xfd, 0xf4, 0xeb, 0xb5, 0xb9, 0xe7, 0xe7, 0x6b, 0x89, 0xaf, 0xce, 0xd7, 0x12, 0x3f, 0x39, 0x5f,
-	0x4b, 0xfc, 0xeb, 0xf9, 0x5a, 0x62, 0x2f, 0xcb, 0x0f, 0x81, 0x07, 0xff, 0x1b, 0x00, 0x00, 0xff,
-	0xff, 0x3b, 0x34, 0xbd, 0xc6, 0xd3, 0x30, 0x00, 0x00,
+	// 5020 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4d, 0x6c, 0x24, 0x49,
+	0x56, 0x76, 0xfd, 0xba, 0xea, 0x55, 0xd9, 0x4e, 0x47, 0x7b, 0x7b, 0xdc, 0xb5, 0xdd, 0x76, 0x4d,
+	0xce, 0xf4, 0xce, 0x6c, 0x6f, 0x53, 0xfd, 0xb7, 0xbb, 0xea, 0x99, 0x61, 0x77, 0xa6, 0xfe, 0x6c,
+	0xd7, 0xb6, 0x5d, 0x55, 0x8a, 0x2a, 0x77, 0xef, 0x22, 0x41, 0x2a, 0x9d, 0x19, 0x2e, 0xe7, 0x38,
+	0x2b, 0xa3, 0xc8, 0xcc, 0xb2, 0xbb, 0x58, 0x10, 0x2d, 0x0e, 0x80, 0x7c, 0x82, 0xdb, 0x22, 0x64,
+	0x2e, 0x70, 0x42, 0x48, 0x1c, 0x40, 0x42, 0x70, 0x1a, 0x24, 0x0e, 0x7b, 0x83, 0x05, 0x09, 0xad,
+	0x40, 0x32, 0xac, 0x0f, 0xdc, 0x56, 0x70, 0x59, 0x71, 0x01, 0x09, 0xc5, 0x4f, 0x66, 0xa5, 0xab,
+	0xd3, 0x76, 0x0f, 0xb3, 0x17, 0xbb, 0xe2, 0xbd, 0xef, 0xbd, 0x78, 0xf1, 0x22, 0xe2, 0xc5, 0x7b,
+	0x11, 0x09, 0xf7, 0x06, 0x96, 0x7f, 0x30, 0xde, 0xab, 0x18, 0x74, 0xf8, 0xc0, 0xa4, 0xc6, 0x21,
+	0x71, 0x1f, 0x78, 0xc7, 0xba, 0x3b, 0x3c, 0xb4, 0xfc, 0x07, 0xfa, 0xc8, 0x7a, 0xe0, 0x4f, 0x46,
+	0xc4, 0xab, 0x8c, 0x5c, 0xea, 0x53, 0x84, 0x04, 0xa0, 0x12, 0x00, 0x2a, 0x47, 0x8f, 0x4a, 0xeb,
+	0x03, 0x4a, 0x07, 0x36, 0x79, 0xc0, 0x11, 0x7b, 0xe3, 0xfd, 0x07, 0xbe, 0x35, 0x24, 0x9e, 0xaf,
+	0x0f, 0x47, 0x42, 0xa8, 0xb4, 0x36, 0x0b, 0x30, 0xc7, 0xae, 0xee, 0x5b, 0xd4, 0x91, 0xfc, 0x95,
+	0x01, 0x1d, 0x50, 0xfe, 0xf3, 0x01, 0xfb, 0x25, 0xa8, 0xea, 0x3a, 0xcc, 0x3f, 0x27, 0xae, 0x67,
+	0x51, 0x07, 0xad, 0x40, 0xc6, 0x72, 0x4c, 0xf2, 0x72, 0x35, 0x51, 0x4e, 0xbc, 0x9f, 0xc6, 0xa2,
+	0xa1, 0x3e, 0x04, 0x68, 0xb1, 0x1f, 0x4d, 0xc7, 0x77, 0x27, 0x48, 0x81, 0xd4, 0x21, 0x99, 0x70,
+	0x44, 0x1e, 0xb3, 0x9f, 0x8c, 0x72, 0xa4, 0xdb, 0xab, 0x49, 0x41, 0x39, 0xd2, 0x6d, 0xf5, 0x27,
+	0x09, 0x28, 0x54, 0x1d, 0x87, 0xfa, 0xbc, 0x77, 0x0f, 0x21, 0x48, 0x3b, 0xfa, 0x90, 0x48, 0x21,
+	0xfe, 0x1b, 0xd5, 0x21, 0x6b, 0xeb, 0x7b, 0xc4, 0xf6, 0x56, 0x93, 0xe5, 0xd4, 0xfb, 0x85, 0xc7,
+	0x5f, 0xab, 0xbc, 0x3e, 0xe4, 0x4a, 0x44, 0x49, 0x65, 0x9b, 0xa3, 0xb9, 0x11, 0x58, 0x8a, 0xa2,
+	0x6f, 0xc3, 0xbc, 0xe5, 0x98, 0x96, 0x41, 0xbc, 0xd5, 0x34, 0xd7, 0xb2, 0x16, 0xa7, 0x65, 0x6a,
+	0x7d, 0x2d, 0xfd, 0xc3, 0xb3, 0xf5, 0x39, 0x1c, 0x08, 0x95, 0x3e, 0x80, 0x42, 0x44, 0x6d, 0xcc,
+	0xd8, 0x56, 0x20, 0x73, 0xa4, 0xdb, 0x63, 0x22, 0x47, 0x27, 0x1a, 0x1f, 0x26, 0x9f, 0x26, 0xd4,
+	0x4f, 0x60, 0xa5, 0xad, 0x0f, 0x89, 0xb9, 0x49, 0x1c, 0xe2, 0x5a, 0x06, 0x26, 0x1e, 0x1d, 0xbb,
+	0x06, 0x61, 0x63, 0x3d, 0xb4, 0x1c, 0x33, 0x18, 0x2b, 0xfb, 0x1d, 0xaf, 0x45, 0xad, 0xc3, 0x5b,
+	0x0d, 0xcb, 0x33, 0x5c, 0xe2, 0x93, 0xcf, 0xad, 0x24, 0x15, 0x28, 0x39, 0x4b, 0xc0, 0xd2, 0xac,
+	0xf4, 0x2f, 0xc1, 0x0d, 0xe6, 0x62, 0x53, 0x73, 0x25, 0x45, 0xf3, 0x46, 0xc4, 0xe0, 0xca, 0x0a,
+	0x8f, 0xdf, 0x8f, 0xf3, 0x50, 0xdc, 0x48, 0xb6, 0xe6, 0xf0, 0x32, 0x57, 0x13, 0x10, 0x7a, 0x23,
+	0x62, 0x20, 0x03, 0x6e, 0x9a, 0xd2, 0xe8, 0x19, 0xf5, 0x49, 0xae, 0x3e, 0x76, 0x1a, 0x2f, 0x19,
+	0xe6, 0xd6, 0x1c, 0x5e, 0x09, 0x94, 0x45, 0x3b, 0xa9, 0x01, 0xe4, 0x02, 0xdd, 0xea, 0x0f, 0x12,
+	0x90, 0x0f, 0x98, 0x1e, 0xfa, 0x2a, 0xe4, 0x1d, 0xdd, 0xa1, 0x9a, 0x31, 0x1a, 0x7b, 0x7c, 0x40,
+	0xa9, 0x5a, 0xf1, 0xfc, 0x6c, 0x3d, 0xd7, 0xd6, 0x1d, 0x5a, 0xef, 0xee, 0x7a, 0x38, 0xc7, 0xd8,
+	0xf5, 0xd1, 0xd8, 0x43, 0x6f, 0x43, 0x71, 0x48, 0x86, 0xd4, 0x9d, 0x68, 0x7b, 0x13, 0x9f, 0x78,
+	0xd2, 0x6d, 0x05, 0x41, 0xab, 0x31, 0x12, 0xfa, 0x16, 0xcc, 0x0f, 0x84, 0x49, 0xab, 0x29, 0xbe,
+	0x7c, 0xde, 0x89, 0xb3, 0x7e, 0xc6, 0x6a, 0x1c, 0xc8, 0xa8, 0xbf, 0x97, 0x80, 0x95, 0x90, 0x4a,
+	0x7e, 0x75, 0x6c, 0xb9, 0x64, 0x48, 0x1c, 0xdf, 0x43, 0xdf, 0x80, 0xac, 0x6d, 0x0d, 0x2d, 0xdf,
+	0x93, 0x3e, 0xbf, 0x13, 0xa7, 0x36, 0x1c, 0x14, 0x96, 0x60, 0x54, 0x85, 0xa2, 0x4b, 0x3c, 0xe2,
+	0x1e, 0x89, 0x15, 0x2f, 0x3d, 0x7a, 0x8d, 0xf0, 0x05, 0x11, 0x75, 0x03, 0x72, 0x5d, 0x5b, 0xf7,
+	0xf7, 0xa9, 0x3b, 0x44, 0x2a, 0x14, 0x75, 0xd7, 0x38, 0xb0, 0x7c, 0x62, 0xf8, 0x63, 0x37, 0xd8,
+	0x7d, 0x17, 0x68, 0xe8, 0x26, 0x24, 0xa9, 0xe8, 0x28, 0x5f, 0xcb, 0x9e, 0x9f, 0xad, 0x27, 0x3b,
+	0x3d, 0x9c, 0xa4, 0x9e, 0xfa, 0x11, 0x2c, 0x77, 0xed, 0xf1, 0xc0, 0x72, 0x1a, 0xc4, 0x33, 0x5c,
+	0x6b, 0xc4, 0xb4, 0xb3, 0x55, 0xc9, 0x62, 0x54, 0xb0, 0x2a, 0xd9, 0xef, 0x70, 0x6b, 0x27, 0xa7,
+	0x5b, 0x5b, 0xfd, 0x9d, 0x24, 0x2c, 0x37, 0x9d, 0x81, 0xe5, 0x90, 0xa8, 0xf4, 0x5d, 0x58, 0x24,
+	0x9c, 0xa8, 0x1d, 0x89, 0x70, 0x23, 0xf5, 0x2c, 0x08, 0x6a, 0x10, 0x83, 0x5a, 0x33, 0x71, 0xe1,
+	0x51, 0xdc, 0xf0, 0x5f, 0xd3, 0x1e, 0x1b, 0x1d, 0x9a, 0x30, 0x3f, 0xe2, 0x83, 0xf0, 0xe4, 0xf4,
+	0xde, 0x8d, 0xd3, 0xf5, 0xda, 0x38, 0x83, 0x20, 0x21, 0x65, 0xbf, 0x48, 0x90, 0xf8, 0xb3, 0x24,
+	0x2c, 0xb5, 0xa9, 0x79, 0xc1, 0x0f, 0x25, 0xc8, 0x1d, 0x50, 0xcf, 0x8f, 0x04, 0xc4, 0xb0, 0x8d,
+	0x9e, 0x42, 0x6e, 0x24, 0xa7, 0x4f, 0xce, 0xfe, 0xed, 0x78, 0x93, 0x05, 0x06, 0x87, 0x68, 0xf4,
+	0x11, 0xe4, 0x83, 0x2d, 0xc3, 0x46, 0xfb, 0x06, 0x0b, 0x67, 0x8a, 0x47, 0xdf, 0x82, 0xac, 0x98,
+	0x84, 0xd5, 0x34, 0x97, 0xbc, 0xfb, 0x46, 0x3e, 0xc7, 0x52, 0x08, 0x6d, 0x42, 0xce, 0xb7, 0x3d,
+	0xcd, 0x72, 0xf6, 0xe9, 0x6a, 0x86, 0x2b, 0x58, 0x8f, 0x0d, 0x32, 0xd4, 0x24, 0xfd, 0xed, 0x5e,
+	0xcb, 0xd9, 0xa7, 0xb5, 0xc2, 0xf9, 0xd9, 0xfa, 0xbc, 0x6c, 0xe0, 0x79, 0xdf, 0xf6, 0xd8, 0x0f,
+	0xf5, 0xf7, 0x13, 0x50, 0x88, 0xa0, 0xd0, 0x1d, 0x00, 0xdf, 0x1d, 0x7b, 0xbe, 0xe6, 0x52, 0xea,
+	0x73, 0x67, 0x15, 0x71, 0x9e, 0x53, 0x30, 0xa5, 0x3e, 0xaa, 0xc0, 0x0d, 0x83, 0xb8, 0xbe, 0x66,
+	0x79, 0xde, 0x98, 0xb8, 0x9a, 0x37, 0xde, 0xfb, 0x94, 0x18, 0x3e, 0x77, 0x5c, 0x11, 0x2f, 0x33,
+	0x56, 0x8b, 0x73, 0x7a, 0x82, 0x81, 0x9e, 0xc0, 0xcd, 0x28, 0x7e, 0x34, 0xde, 0xb3, 0x2d, 0x43,
+	0x63, 0x93, 0x99, 0xe2, 0x22, 0x37, 0xa6, 0x22, 0x5d, 0xce, 0x7b, 0x46, 0x26, 0xea, 0x8f, 0x13,
+	0xa0, 0x60, 0x7d, 0xdf, 0xdf, 0x21, 0xc3, 0x3d, 0xe2, 0xf6, 0x7c, 0xdd, 0x1f, 0x7b, 0xe8, 0x26,
+	0x64, 0x6d, 0xa2, 0x9b, 0xc4, 0xe5, 0x46, 0xe5, 0xb0, 0x6c, 0xa1, 0x5d, 0xb6, 0x83, 0x75, 0xe3,
+	0x40, 0xdf, 0xb3, 0x6c, 0xcb, 0x9f, 0x70, 0x53, 0x16, 0xe3, 0x97, 0xf0, 0xac, 0xce, 0x0a, 0x8e,
+	0x08, 0xe2, 0x0b, 0x6a, 0xd0, 0x2a, 0xcc, 0x0f, 0x89, 0xe7, 0xe9, 0x03, 0xc2, 0x2d, 0xcd, 0xe3,
+	0xa0, 0xa9, 0x7e, 0x04, 0xc5, 0xa8, 0x1c, 0x2a, 0xc0, 0xfc, 0x6e, 0xfb, 0x59, 0xbb, 0xf3, 0xa2,
+	0xad, 0xcc, 0xa1, 0x25, 0x28, 0xec, 0xb6, 0x71, 0xb3, 0x5a, 0xdf, 0xaa, 0xd6, 0xb6, 0x9b, 0x4a,
+	0x02, 0x2d, 0x40, 0x7e, 0xda, 0x4c, 0xaa, 0x7f, 0x91, 0x00, 0x60, 0xee, 0x96, 0x83, 0xfa, 0x10,
+	0x32, 0x9e, 0xaf, 0xfb, 0x62, 0x55, 0x2e, 0x3e, 0x7e, 0xf7, 0xb2, 0x39, 0x94, 0xf6, 0xb2, 0x7f,
+	0x04, 0x0b, 0x91, 0xa8, 0x85, 0xc9, 0x0b, 0x16, 0xb2, 0x00, 0xa1, 0x9b, 0xa6, 0x2b, 0x0d, 0xe7,
+	0xbf, 0xd5, 0x8f, 0x20, 0xc3, 0xa5, 0x2f, 0x9a, 0x9b, 0x83, 0x74, 0x83, 0xfd, 0x4a, 0xa0, 0x3c,
+	0x64, 0x70, 0xb3, 0xda, 0xf8, 0x9e, 0x92, 0x44, 0x0a, 0x14, 0x1b, 0xad, 0x5e, 0xbd, 0xd3, 0x6e,
+	0x37, 0xeb, 0xfd, 0x66, 0x43, 0x49, 0xa9, 0x77, 0x21, 0xd3, 0x1a, 0x32, 0xcd, 0xb7, 0xd9, 0x92,
+	0xdf, 0x27, 0x2e, 0x71, 0x8c, 0x60, 0x27, 0x4d, 0x09, 0xea, 0x4f, 0x0b, 0x90, 0xd9, 0xa1, 0x63,
+	0xc7, 0x47, 0x8f, 0x23, 0x61, 0x6b, 0x31, 0x3e, 0x43, 0xe0, 0xc0, 0x4a, 0x7f, 0x32, 0x22, 0x32,
+	0xac, 0xdd, 0x84, 0xac, 0xd8, 0x1c, 0x72, 0x38, 0xb2, 0xc5, 0xe8, 0xbe, 0xee, 0x0e, 0x88, 0x2f,
+	0xc7, 0x23, 0x5b, 0xe8, 0x7d, 0x76, 0x62, 0xe9, 0x26, 0x75, 0xec, 0x09, 0xdf, 0x43, 0x39, 0x71,
+	0x2c, 0x61, 0xa2, 0x9b, 0x1d, 0xc7, 0x9e, 0xe0, 0x90, 0x8b, 0xb6, 0xa0, 0xb8, 0x67, 0x39, 0xa6,
+	0x46, 0x47, 0x22, 0xc8, 0x67, 0x2e, 0xdf, 0x71, 0xc2, 0xaa, 0x9a, 0xe5, 0x98, 0x1d, 0x01, 0xc6,
+	0x85, 0xbd, 0x69, 0x03, 0xb5, 0x61, 0xf1, 0x88, 0xda, 0xe3, 0x21, 0x09, 0x75, 0x65, 0xb9, 0xae,
+	0xf7, 0x2e, 0xd7, 0xf5, 0x9c, 0xe3, 0x03, 0x6d, 0x0b, 0x47, 0xd1, 0x26, 0x7a, 0x06, 0x0b, 0xfe,
+	0x70, 0xb4, 0xef, 0x85, 0xea, 0xe6, 0xb9, 0xba, 0xaf, 0x5c, 0xe1, 0x30, 0x06, 0x0f, 0xb4, 0x15,
+	0xfd, 0x48, 0x0b, 0x6d, 0x42, 0xc1, 0xa0, 0x8e, 0x67, 0x79, 0x3e, 0x71, 0x8c, 0xc9, 0x6a, 0x8e,
+	0xfb, 0xfe, 0x8a, 0x51, 0xd6, 0xa7, 0x60, 0x1c, 0x95, 0x2c, 0xfd, 0x56, 0x0a, 0x0a, 0x11, 0x17,
+	0xa0, 0x1e, 0x14, 0x46, 0x2e, 0x1d, 0xe9, 0x03, 0x7e, 0xe2, 0xc9, 0x49, 0x7d, 0xf4, 0x46, 0xee,
+	0xab, 0x74, 0xa7, 0x82, 0x38, 0xaa, 0x45, 0x3d, 0x4d, 0x42, 0x21, 0xc2, 0x44, 0xf7, 0x20, 0x87,
+	0xbb, 0xb8, 0xf5, 0xbc, 0xda, 0x6f, 0x2a, 0x73, 0xa5, 0xdb, 0x27, 0xa7, 0xe5, 0x55, 0xae, 0x2d,
+	0xaa, 0xa0, 0xeb, 0x5a, 0x47, 0x6c, 0x0d, 0xbf, 0x0f, 0xf3, 0x01, 0x34, 0x51, 0xfa, 0xf2, 0xc9,
+	0x69, 0xf9, 0xad, 0x59, 0x68, 0x04, 0x89, 0x7b, 0x5b, 0x55, 0xdc, 0x6c, 0x28, 0xc9, 0x78, 0x24,
+	0xee, 0x1d, 0xe8, 0x2e, 0x31, 0xd1, 0x57, 0x20, 0x2b, 0x81, 0xa9, 0x52, 0xe9, 0xe4, 0xb4, 0x7c,
+	0x73, 0x16, 0x38, 0xc5, 0xe1, 0xde, 0x76, 0xf5, 0x79, 0x53, 0x49, 0xc7, 0xe3, 0x70, 0xcf, 0xd6,
+	0x8f, 0x08, 0x7a, 0x17, 0x32, 0x02, 0x96, 0x29, 0xdd, 0x3a, 0x39, 0x2d, 0x7f, 0xe9, 0x35, 0x75,
+	0x0c, 0x55, 0x5a, 0xfd, 0xdd, 0x3f, 0x5e, 0x9b, 0xfb, 0x9b, 0x3f, 0x59, 0x53, 0x66, 0xd9, 0xa5,
+	0xff, 0x49, 0xc0, 0xc2, 0x85, 0xb5, 0x83, 0x54, 0xc8, 0x3a, 0xd4, 0xa0, 0x23, 0x71, 0x10, 0xe6,
+	0x6a, 0x70, 0x7e, 0xb6, 0x9e, 0x6d, 0xd3, 0x3a, 0x1d, 0x4d, 0xb0, 0xe4, 0xa0, 0x67, 0x33, 0x47,
+	0xf9, 0x93, 0x37, 0x5c, 0x98, 0xb1, 0x87, 0xf9, 0xc7, 0xb0, 0x60, 0xba, 0xd6, 0x11, 0x71, 0x35,
+	0x83, 0x3a, 0xfb, 0xd6, 0x40, 0x1e, 0x72, 0xa5, 0xd8, 0x7c, 0x93, 0x03, 0x71, 0x51, 0x08, 0xd4,
+	0x39, 0xfe, 0x0b, 0x1c, 0xe3, 0xa5, 0xe7, 0x50, 0x8c, 0x2e, 0x75, 0x76, 0x2e, 0x79, 0xd6, 0xaf,
+	0x11, 0x99, 0x58, 0xf2, 0x34, 0x14, 0xe7, 0x19, 0x45, 0xa4, 0x95, 0xef, 0x41, 0x7a, 0x48, 0x4d,
+	0xa1, 0x67, 0xa1, 0x76, 0x83, 0x65, 0x13, 0xff, 0x72, 0xb6, 0x5e, 0xa0, 0x5e, 0x65, 0xc3, 0xb2,
+	0xc9, 0x0e, 0x35, 0x09, 0xe6, 0x00, 0xf5, 0x08, 0xd2, 0x2c, 0xe6, 0xa0, 0x2f, 0x43, 0xba, 0xd6,
+	0x6a, 0x37, 0x94, 0xb9, 0xd2, 0xf2, 0xc9, 0x69, 0x79, 0x81, 0xbb, 0x84, 0x31, 0xd8, 0xda, 0x45,
+	0xeb, 0x90, 0x7d, 0xde, 0xd9, 0xde, 0xdd, 0x61, 0xcb, 0xeb, 0xc6, 0xc9, 0x69, 0x79, 0x29, 0x64,
+	0x0b, 0xa7, 0xa1, 0x3b, 0x90, 0xe9, 0xef, 0x74, 0x37, 0x7a, 0x4a, 0xb2, 0x84, 0x4e, 0x4e, 0xcb,
+	0x8b, 0x21, 0x9f, 0xdb, 0x5c, 0x5a, 0x96, 0xb3, 0x9a, 0x0f, 0xe9, 0xea, 0x8f, 0x12, 0x50, 0x88,
+	0x6c, 0x38, 0xb6, 0x30, 0x1b, 0xcd, 0x8d, 0xea, 0xee, 0x76, 0x5f, 0x99, 0x8b, 0x2c, 0xcc, 0x08,
+	0xa4, 0x41, 0xf6, 0xf5, 0xb1, 0xcd, 0xe2, 0x1c, 0xd4, 0x3b, 0xed, 0x5e, 0xab, 0xd7, 0x6f, 0xb6,
+	0xfb, 0x4a, 0xa2, 0xb4, 0x7a, 0x72, 0x5a, 0x5e, 0x99, 0x05, 0x6f, 0x8c, 0x6d, 0x9b, 0x2d, 0xcd,
+	0x7a, 0xb5, 0xbe, 0xc5, 0xd7, 0xfa, 0x74, 0x69, 0x46, 0x50, 0x75, 0xdd, 0x38, 0x20, 0x26, 0xba,
+	0x0f, 0xf9, 0x46, 0x73, 0xbb, 0xb9, 0x59, 0xe5, 0xd1, 0xbd, 0x74, 0xe7, 0xe4, 0xb4, 0x7c, 0xeb,
+	0xf5, 0xde, 0x6d, 0x32, 0xd0, 0x7d, 0x62, 0xce, 0x2c, 0xd1, 0x08, 0x44, 0xfd, 0x59, 0x12, 0x16,
+	0x30, 0x2b, 0x87, 0x5d, 0xbf, 0x4b, 0x6d, 0xcb, 0x98, 0xa0, 0x2e, 0xe4, 0x0d, 0xea, 0x98, 0x56,
+	0x24, 0x4e, 0x3c, 0xbe, 0x24, 0x25, 0x9a, 0x4a, 0x05, 0xad, 0x7a, 0x20, 0x89, 0xa7, 0x4a, 0xd0,
+	0x03, 0xc8, 0x98, 0xc4, 0xd6, 0x27, 0x32, 0x37, 0xbb, 0x55, 0x11, 0x05, 0x77, 0x25, 0x28, 0xb8,
+	0x2b, 0x0d, 0x59, 0x70, 0x63, 0x81, 0xe3, 0x35, 0x88, 0xfe, 0x52, 0xd3, 0x7d, 0x9f, 0x0c, 0x47,
+	0xbe, 0x48, 0xcc, 0xd2, 0xb8, 0x30, 0xd4, 0x5f, 0x56, 0x25, 0x09, 0x3d, 0x82, 0xec, 0xb1, 0xe5,
+	0x98, 0xf4, 0x58, 0xe6, 0x5e, 0x57, 0x28, 0x95, 0x40, 0xf5, 0x84, 0xa5, 0x24, 0x33, 0x66, 0xb2,
+	0x35, 0xd4, 0xee, 0xb4, 0x9b, 0xc1, 0x1a, 0x92, 0xfc, 0x8e, 0xd3, 0xa6, 0x0e, 0xdb, 0xff, 0xd0,
+	0x69, 0x6b, 0x1b, 0xd5, 0xd6, 0xf6, 0x2e, 0x66, 0xeb, 0x68, 0xe5, 0xe4, 0xb4, 0xac, 0x84, 0x90,
+	0x0d, 0xdd, 0xb2, 0x59, 0x31, 0x70, 0x0b, 0x52, 0xd5, 0xf6, 0xf7, 0x94, 0x64, 0x49, 0x39, 0x39,
+	0x2d, 0x17, 0x43, 0x76, 0xd5, 0x99, 0x4c, 0xfd, 0x3e, 0xdb, 0xaf, 0xfa, 0xf7, 0x29, 0x28, 0xee,
+	0x8e, 0x4c, 0xdd, 0x27, 0x62, 0x9f, 0xa1, 0x32, 0x14, 0x46, 0xba, 0xab, 0xdb, 0x36, 0xb1, 0x2d,
+	0x6f, 0x28, 0xaf, 0x12, 0xa2, 0x24, 0xf4, 0xc1, 0x9b, 0xba, 0xb1, 0x96, 0x63, 0x7b, 0xe7, 0x07,
+	0xff, 0xb6, 0x9e, 0x08, 0x1c, 0xba, 0x0b, 0x8b, 0xfb, 0xc2, 0x5a, 0x4d, 0x37, 0xf8, 0xc4, 0xa6,
+	0xf8, 0xc4, 0x56, 0xe2, 0x26, 0x36, 0x6a, 0x56, 0x45, 0x0e, 0xb2, 0xca, 0xa5, 0xf0, 0xc2, 0x7e,
+	0xb4, 0x89, 0x9e, 0xc0, 0xfc, 0x90, 0x3a, 0x96, 0x4f, 0xdd, 0xeb, 0x67, 0x21, 0x40, 0xa2, 0x7b,
+	0xb0, 0xcc, 0x26, 0x37, 0xb0, 0x87, 0xb3, 0xf9, 0x71, 0x9e, 0xc4, 0x4b, 0x43, 0xfd, 0xa5, 0xec,
+	0x10, 0x33, 0x32, 0xaa, 0x41, 0x86, 0xba, 0x2c, 0x5f, 0xcc, 0x72, 0x73, 0xef, 0x5f, 0x6b, 0xae,
+	0x68, 0x74, 0x98, 0x0c, 0x16, 0xa2, 0xea, 0x37, 0x61, 0xe1, 0xc2, 0x20, 0x58, 0x9a, 0xd4, 0xad,
+	0xee, 0xf6, 0x9a, 0xca, 0x1c, 0x2a, 0x42, 0xae, 0xde, 0x69, 0xf7, 0x5b, 0xed, 0x5d, 0x96, 0xe7,
+	0x15, 0x21, 0x87, 0x3b, 0xdb, 0xdb, 0xb5, 0x6a, 0xfd, 0x99, 0x92, 0x54, 0x2b, 0x50, 0x88, 0x68,
+	0x43, 0x8b, 0x00, 0xbd, 0x7e, 0xa7, 0xab, 0x6d, 0xb4, 0x70, 0xaf, 0x2f, 0xb2, 0xc4, 0x5e, 0xbf,
+	0x8a, 0xfb, 0x92, 0x90, 0x50, 0xff, 0x33, 0x19, 0xcc, 0xa8, 0x4c, 0x0c, 0x6b, 0x17, 0x13, 0xc3,
+	0x2b, 0x8c, 0x97, 0xa9, 0xe1, 0xb4, 0x11, 0x26, 0x88, 0x1f, 0x00, 0xf0, 0x85, 0x43, 0x4c, 0x4d,
+	0xf7, 0xe5, 0xc4, 0x97, 0x5e, 0x73, 0x72, 0x3f, 0xb8, 0xd1, 0xc2, 0x79, 0x89, 0xae, 0xfa, 0xe8,
+	0x5b, 0x50, 0x34, 0xe8, 0x70, 0x64, 0x13, 0x29, 0x9c, 0xba, 0x56, 0xb8, 0x10, 0xe2, 0xab, 0x7e,
+	0x34, 0x35, 0x4d, 0x5f, 0x4c, 0x9e, 0x7f, 0x3b, 0x11, 0x78, 0x26, 0x26, 0x1b, 0x2d, 0x42, 0x6e,
+	0xb7, 0xdb, 0xa8, 0xf6, 0x5b, 0xed, 0x4d, 0x25, 0x81, 0x00, 0xb2, 0xdc, 0xd5, 0x0d, 0x25, 0xc9,
+	0xb2, 0xe8, 0x7a, 0x67, 0xa7, 0xbb, 0xdd, 0xe4, 0x11, 0x0b, 0xad, 0x80, 0x12, 0x38, 0x5b, 0xe3,
+	0x8e, 0x6c, 0x36, 0x94, 0x34, 0xba, 0x01, 0x4b, 0x21, 0x55, 0x4a, 0x66, 0xd0, 0x4d, 0x40, 0x21,
+	0x71, 0xaa, 0x22, 0xab, 0xfe, 0x06, 0x2c, 0xd5, 0xa9, 0xe3, 0xeb, 0x96, 0x13, 0x56, 0x18, 0x8f,
+	0xd9, 0xa0, 0x25, 0x49, 0xb3, 0xe4, 0x4d, 0x50, 0x6d, 0xe9, 0xfc, 0x6c, 0xbd, 0x10, 0x42, 0x5b,
+	0x0d, 0x9e, 0x2a, 0xc9, 0x86, 0xc9, 0xf6, 0xef, 0xc8, 0x32, 0xb9, 0x73, 0x33, 0xb5, 0xf9, 0xf3,
+	0xb3, 0xf5, 0x54, 0xb7, 0xd5, 0xc0, 0x8c, 0x86, 0xbe, 0x0c, 0x79, 0xf2, 0xd2, 0xf2, 0x35, 0x83,
+	0x9d, 0x4b, 0xcc, 0x81, 0x19, 0x9c, 0x63, 0x84, 0x3a, 0x3b, 0x86, 0x6a, 0x00, 0x5d, 0xea, 0xfa,
+	0xb2, 0xe7, 0xaf, 0x43, 0x66, 0x44, 0x5d, 0x7e, 0x77, 0x71, 0xe9, 0x8d, 0x1a, 0x83, 0x8b, 0x85,
+	0x8a, 0x05, 0x58, 0xfd, 0x83, 0x14, 0x40, 0x5f, 0xf7, 0x0e, 0xa5, 0x92, 0xa7, 0x90, 0x0f, 0x6f,
+	0x27, 0xe5, 0x25, 0xc8, 0x95, 0xb3, 0x1d, 0x82, 0xd1, 0x93, 0x60, 0xb1, 0x89, 0xda, 0x29, 0xb6,
+	0x88, 0x0d, 0x3a, 0x8a, 0x2b, 0x3f, 0x2e, 0x16, 0x48, 0xec, 0x98, 0x27, 0xae, 0x2b, 0x67, 0x9e,
+	0xfd, 0x44, 0x75, 0x7e, 0x2c, 0x08, 0xa7, 0xc9, 0xec, 0x3b, 0xf6, 0xda, 0x67, 0x66, 0x46, 0xb6,
+	0xe6, 0xf0, 0x54, 0x0e, 0x7d, 0x0c, 0x05, 0x36, 0x6e, 0xcd, 0xe3, 0x3c, 0x99, 0x78, 0x5f, 0xea,
+	0x2a, 0xa1, 0x01, 0xc3, 0x68, 0xea, 0xe5, 0x3b, 0x00, 0xfa, 0x68, 0x64, 0x5b, 0xc4, 0xd4, 0xf6,
+	0x26, 0x3c, 0xd3, 0xce, 0xe3, 0xbc, 0xa4, 0xd4, 0x26, 0x6c, 0xbb, 0x04, 0x6c, 0xdd, 0xe7, 0xd9,
+	0xf3, 0x35, 0x0e, 0x94, 0xe8, 0xaa, 0x5f, 0x53, 0x60, 0xd1, 0x1d, 0x3b, 0xcc, 0xa1, 0xd2, 0x3a,
+	0xf5, 0xcf, 0x93, 0xf0, 0x56, 0x9b, 0xf8, 0xc7, 0xd4, 0x3d, 0xac, 0xfa, 0xbe, 0x6e, 0x1c, 0x0c,
+	0x89, 0x23, 0xa7, 0x2f, 0x52, 0xd0, 0x24, 0x2e, 0x14, 0x34, 0xab, 0x30, 0xaf, 0xdb, 0x96, 0xee,
+	0x11, 0x91, 0xbc, 0xe5, 0x71, 0xd0, 0x64, 0x65, 0x17, 0x2b, 0xe2, 0x88, 0xe7, 0x11, 0x71, 0xaf,
+	0xc2, 0x0c, 0x0f, 0x08, 0xe8, 0xfb, 0x70, 0x53, 0xa6, 0x69, 0x7a, 0xd8, 0x15, 0x2b, 0x28, 0x82,
+	0x0b, 0xda, 0x66, 0x6c, 0x55, 0x19, 0x6f, 0x9c, 0xcc, 0xe3, 0xa6, 0xe4, 0xce, 0xc8, 0x97, 0x59,
+	0xe1, 0x8a, 0x19, 0xc3, 0x2a, 0x6d, 0xc2, 0xad, 0x4b, 0x45, 0x3e, 0xd7, 0xbd, 0xcd, 0x3f, 0x25,
+	0x01, 0x5a, 0xdd, 0xea, 0x8e, 0x74, 0x52, 0x03, 0xb2, 0xfb, 0xfa, 0xd0, 0xb2, 0x27, 0x57, 0x45,
+	0xc0, 0x29, 0xbe, 0x52, 0x15, 0xee, 0xd8, 0xe0, 0x32, 0x58, 0xca, 0xf2, 0x9a, 0x72, 0xbc, 0xe7,
+	0x10, 0x3f, 0xac, 0x29, 0x79, 0x8b, 0x99, 0xe1, 0xea, 0x4e, 0xb8, 0x74, 0x45, 0x83, 0x4d, 0x00,
+	0x4b, 0x79, 0x8e, 0xf5, 0x49, 0x10, 0xb6, 0x64, 0x13, 0x6d, 0xf1, 0xdb, 0x51, 0xe2, 0x1e, 0x11,
+	0x73, 0x35, 0xc3, 0x9d, 0x7a, 0x9d, 0x3d, 0x58, 0xc2, 0x85, 0xef, 0x42, 0xe9, 0xd2, 0x47, 0x3c,
+	0x65, 0x9a, 0xb2, 0x3e, 0x97, 0x8f, 0x1e, 0xc2, 0xc2, 0x85, 0x71, 0xbe, 0x56, 0xcc, 0xb7, 0xba,
+	0xcf, 0xbf, 0xae, 0xa4, 0xe5, 0xaf, 0x6f, 0x2a, 0x59, 0xf5, 0x4f, 0x53, 0x22, 0xd0, 0x48, 0xaf,
+	0xc6, 0xbf, 0x0a, 0xe4, 0xf8, 0xea, 0x36, 0xa8, 0x2d, 0x03, 0xc0, 0x7b, 0x57, 0xc7, 0x1f, 0x56,
+	0xd3, 0x71, 0x38, 0x0e, 0x05, 0xd1, 0x3a, 0x14, 0xc4, 0x2a, 0xd6, 0xd8, 0x86, 0xe3, 0x6e, 0x5d,
+	0xc0, 0x20, 0x48, 0x4c, 0x12, 0xdd, 0x85, 0x45, 0x7e, 0xf9, 0xe3, 0x1d, 0x10, 0x53, 0x60, 0xd2,
+	0x1c, 0xb3, 0x10, 0x52, 0x39, 0x6c, 0x07, 0x8a, 0x92, 0xa0, 0xf1, 0x7c, 0x3e, 0xc3, 0x0d, 0xba,
+	0x77, 0x9d, 0x41, 0x42, 0x84, 0xa7, 0xf9, 0x85, 0xd1, 0xb4, 0xa1, 0x36, 0x20, 0x17, 0x18, 0x8b,
+	0x56, 0x21, 0xd5, 0xaf, 0x77, 0x95, 0xb9, 0xd2, 0xd2, 0xc9, 0x69, 0xb9, 0x10, 0x90, 0xfb, 0xf5,
+	0x2e, 0xe3, 0xec, 0x36, 0xba, 0x4a, 0xe2, 0x22, 0x67, 0xb7, 0xd1, 0x2d, 0xa5, 0x59, 0x0e, 0xa6,
+	0xee, 0x43, 0x21, 0xd2, 0x03, 0x7a, 0x07, 0xe6, 0x5b, 0xed, 0x4d, 0xdc, 0xec, 0xf5, 0x94, 0xb9,
+	0xd2, 0xcd, 0x93, 0xd3, 0x32, 0x8a, 0x70, 0x5b, 0xce, 0x80, 0xcd, 0x0f, 0xba, 0x03, 0xe9, 0xad,
+	0x0e, 0x3b, 0xdb, 0x45, 0x01, 0x11, 0x41, 0x6c, 0x51, 0xcf, 0x2f, 0xdd, 0x90, 0xc9, 0x5d, 0x54,
+	0xb1, 0xfa, 0x87, 0x09, 0xc8, 0x8a, 0xcd, 0x14, 0x3b, 0x51, 0x55, 0x98, 0x0f, 0xae, 0x09, 0x44,
+	0x71, 0xf7, 0xde, 0xe5, 0x85, 0x58, 0x45, 0xd6, 0x4d, 0x62, 0xf9, 0x05, 0x72, 0xa5, 0x0f, 0xa1,
+	0x18, 0x65, 0x7c, 0xae, 0xc5, 0xf7, 0x7d, 0x28, 0xb0, 0xf5, 0x1d, 0x14, 0x64, 0x8f, 0x21, 0x2b,
+	0x02, 0x42, 0x78, 0xd6, 0x5c, 0x5e, 0x15, 0x4a, 0x24, 0x7a, 0x0a, 0xf3, 0xa2, 0x92, 0x0c, 0x6e,
+	0x87, 0xd7, 0xae, 0xde, 0x45, 0x38, 0x80, 0xab, 0x1f, 0x43, 0xba, 0x4b, 0x88, 0xcb, 0x7c, 0xef,
+	0x50, 0x93, 0x4c, 0x8f, 0x67, 0x59, 0x04, 0x9b, 0xa4, 0xd5, 0x60, 0x45, 0xb0, 0x49, 0x5a, 0x66,
+	0x78, 0xff, 0x95, 0x8c, 0xdc, 0x7f, 0xf5, 0xa1, 0xf8, 0x82, 0x58, 0x83, 0x03, 0x9f, 0x98, 0x5c,
+	0xd1, 0x7d, 0x48, 0x8f, 0x48, 0x68, 0xfc, 0x6a, 0xec, 0x02, 0x23, 0xc4, 0xc5, 0x1c, 0xc5, 0xe2,
+	0xc8, 0x31, 0x97, 0x96, 0x4f, 0x1a, 0xb2, 0xa5, 0xfe, 0x63, 0x12, 0x16, 0x5b, 0x9e, 0x37, 0xd6,
+	0x1d, 0x23, 0xc8, 0xdc, 0xbe, 0x7d, 0x31, 0x73, 0x8b, 0x7d, 0xfb, 0xb9, 0x28, 0x72, 0xf1, 0x5a,
+	0x4f, 0x9e, 0x9e, 0xc9, 0xf0, 0xf4, 0x54, 0x7f, 0x9a, 0x08, 0xee, 0xee, 0xee, 0x46, 0xb6, 0xbb,
+	0xa8, 0x03, 0xa3, 0x9a, 0xc8, 0xae, 0x73, 0xe8, 0xd0, 0x63, 0x07, 0xbd, 0x0d, 0x19, 0xdc, 0x6c,
+	0x37, 0x5f, 0x28, 0x09, 0xb1, 0x3c, 0x2f, 0x80, 0x30, 0x71, 0xc8, 0x31, 0xd3, 0xd4, 0x6d, 0xb6,
+	0x1b, 0x2c, 0xd3, 0x4a, 0xc6, 0x68, 0xea, 0x12, 0xc7, 0xb4, 0x9c, 0x01, 0x7a, 0x07, 0xb2, 0xad,
+	0x5e, 0x6f, 0x97, 0x97, 0x89, 0x6f, 0x9d, 0x9c, 0x96, 0x6f, 0x5c, 0x40, 0xf1, 0x7b, 0x5b, 0x93,
+	0x81, 0x58, 0x99, 0xc3, 0x72, 0xb0, 0x18, 0x10, 0xcb, 0x9f, 0x05, 0x08, 0x77, 0xfa, 0xd5, 0x7e,
+	0x53, 0xc9, 0xc4, 0x80, 0x30, 0x65, 0x7f, 0xe5, 0x76, 0xfb, 0xd7, 0x24, 0x28, 0x55, 0xc3, 0x20,
+	0x23, 0x9f, 0xf1, 0x65, 0x65, 0xd9, 0x87, 0xdc, 0x88, 0xfd, 0xb2, 0x48, 0x90, 0x25, 0x3d, 0x8d,
+	0x7d, 0xbd, 0x9c, 0x91, 0xab, 0x60, 0x6a, 0x93, 0xaa, 0x39, 0xb4, 0x3c, 0xcf, 0xa2, 0x8e, 0xa0,
+	0xe1, 0x50, 0x53, 0xe9, 0xbf, 0x12, 0x70, 0x23, 0x06, 0x81, 0x1e, 0x42, 0xda, 0xa5, 0x76, 0x30,
+	0x87, 0xb7, 0x2f, 0xbb, 0x96, 0x65, 0xa2, 0x98, 0x23, 0xd1, 0x1a, 0x80, 0x3e, 0xf6, 0xa9, 0xce,
+	0xfb, 0xe7, 0xb3, 0x97, 0xc3, 0x11, 0x0a, 0x7a, 0x01, 0x59, 0x8f, 0x18, 0x2e, 0x09, 0x72, 0xe9,
+	0x8f, 0xff, 0xbf, 0xd6, 0x57, 0x7a, 0x5c, 0x0d, 0x96, 0xea, 0x4a, 0x15, 0xc8, 0x0a, 0x0a, 0x5b,
+	0xf6, 0xa6, 0xee, 0xeb, 0xf2, 0xd2, 0x9e, 0xff, 0x66, 0xab, 0x49, 0xb7, 0x07, 0xc1, 0x6a, 0xd2,
+	0xed, 0x81, 0xfa, 0x77, 0x49, 0x80, 0xe6, 0x4b, 0x9f, 0xb8, 0x8e, 0x6e, 0xd7, 0xab, 0xa8, 0x19,
+	0x89, 0xfe, 0x62, 0xb4, 0x5f, 0x8d, 0x7d, 0x89, 0x08, 0x25, 0x2a, 0xf5, 0x6a, 0x4c, 0xfc, 0xbf,
+	0x05, 0xa9, 0xb1, 0x2b, 0x1f, 0xa4, 0x45, 0x1e, 0xbc, 0x8b, 0xb7, 0x31, 0xa3, 0xa1, 0xe6, 0x34,
+	0x6c, 0xa5, 0x2e, 0x7f, 0x76, 0x8e, 0x74, 0x10, 0x1b, 0xba, 0xd8, 0xce, 0x37, 0x74, 0xcd, 0x20,
+	0xf2, 0xe4, 0x28, 0x8a, 0x9d, 0x5f, 0xaf, 0xd6, 0x89, 0xeb, 0xe3, 0xac, 0xa1, 0xb3, 0xff, 0x5f,
+	0x28, 0xbe, 0xdd, 0x07, 0x98, 0x0e, 0x0d, 0xad, 0x41, 0xa6, 0xbe, 0xd1, 0xeb, 0x6d, 0x2b, 0x73,
+	0x22, 0x80, 0x4f, 0x59, 0x9c, 0xac, 0xfe, 0x75, 0x12, 0x72, 0xf5, 0xaa, 0x3c, 0x56, 0xeb, 0xa0,
+	0xf0, 0xa8, 0xc4, 0x9f, 0x3a, 0xc8, 0xcb, 0x91, 0xe5, 0x4e, 0x64, 0x60, 0xb9, 0xa2, 0xa8, 0x5d,
+	0x64, 0x22, 0xcc, 0xea, 0x26, 0x17, 0x40, 0x18, 0x8a, 0x44, 0x3a, 0x41, 0x33, 0xf4, 0x20, 0xc6,
+	0xaf, 0x5d, 0xed, 0x2c, 0x51, 0x9e, 0x4c, 0xdb, 0x1e, 0x2e, 0x04, 0x4a, 0xea, 0xba, 0x87, 0x3e,
+	0x80, 0x25, 0xcf, 0x1a, 0x38, 0x96, 0x33, 0xd0, 0x02, 0xe7, 0xf1, 0x77, 0x97, 0xda, 0xf2, 0xf9,
+	0xd9, 0xfa, 0x42, 0x4f, 0xb0, 0xa4, 0x0f, 0x17, 0x24, 0xb2, 0xce, 0x5d, 0x89, 0xbe, 0x09, 0x8b,
+	0x11, 0x51, 0xe6, 0x45, 0xe1, 0x76, 0xe5, 0xfc, 0x6c, 0xbd, 0x18, 0x4a, 0x3e, 0x23, 0x13, 0x5c,
+	0x0c, 0x05, 0x9f, 0x11, 0x7e, 0xff, 0xb2, 0x4f, 0x5d, 0x83, 0x68, 0x2e, 0xdf, 0xd3, 0xfc, 0x04,
+	0x4f, 0xe3, 0x02, 0xa7, 0x89, 0x6d, 0xae, 0x3e, 0x87, 0x1b, 0x1d, 0xd7, 0x38, 0x20, 0x9e, 0x2f,
+	0x5c, 0x21, 0xbd, 0xf8, 0x31, 0xdc, 0xf6, 0x75, 0xef, 0x50, 0x3b, 0xb0, 0x3c, 0x9f, 0xba, 0x13,
+	0xcd, 0x25, 0x3e, 0x71, 0x18, 0x5f, 0xe3, 0x8f, 0xb5, 0xf2, 0xd2, 0xef, 0x16, 0xc3, 0x6c, 0x09,
+	0x08, 0x0e, 0x10, 0xdb, 0x0c, 0xa0, 0xb6, 0xa0, 0xc8, 0xca, 0x14, 0x79, 0x71, 0xc6, 0x46, 0x0f,
+	0x36, 0x1d, 0x68, 0x6f, 0x7c, 0x4c, 0xe5, 0x6d, 0x3a, 0x10, 0x3f, 0xd5, 0xef, 0x82, 0xd2, 0xb0,
+	0xbc, 0x91, 0xee, 0x1b, 0x07, 0xc1, 0x6d, 0x26, 0x6a, 0x80, 0x72, 0x40, 0x74, 0xd7, 0xdf, 0x23,
+	0xba, 0xaf, 0x8d, 0x88, 0x6b, 0x51, 0xf3, 0xfa, 0x59, 0x5e, 0x0a, 0x45, 0xba, 0x5c, 0x42, 0xfd,
+	0xef, 0x04, 0x00, 0xd6, 0xf7, 0x83, 0x8c, 0xec, 0x6b, 0xb0, 0xec, 0x39, 0xfa, 0xc8, 0x3b, 0xa0,
+	0xbe, 0x66, 0x39, 0x3e, 0x71, 0x8f, 0x74, 0x5b, 0x5e, 0xe0, 0x28, 0x01, 0xa3, 0x25, 0xe9, 0xe8,
+	0x3e, 0xa0, 0x43, 0x42, 0x46, 0x1a, 0xb5, 0x4d, 0x2d, 0x60, 0x8a, 0xa7, 0xe4, 0x34, 0x56, 0x18,
+	0xa7, 0x63, 0x9b, 0xbd, 0x80, 0x8e, 0x6a, 0xb0, 0xc6, 0x86, 0x4f, 0x1c, 0xdf, 0xb5, 0x88, 0xa7,
+	0xed, 0x53, 0x57, 0xf3, 0x6c, 0x7a, 0xac, 0xed, 0x53, 0xdb, 0xa6, 0xc7, 0xc4, 0x0d, 0xee, 0xc6,
+	0x4a, 0x36, 0x1d, 0x34, 0x05, 0x68, 0x83, 0xba, 0x3d, 0x9b, 0x1e, 0x6f, 0x04, 0x08, 0x96, 0xb6,
+	0x4d, 0xc7, 0xec, 0x5b, 0xc6, 0x61, 0x90, 0xb6, 0x85, 0xd4, 0xbe, 0x65, 0x1c, 0xa2, 0x77, 0x60,
+	0x81, 0xd8, 0x84, 0x5f, 0x91, 0x08, 0x54, 0x86, 0xa3, 0x8a, 0x01, 0x91, 0x81, 0xd4, 0x4f, 0x40,
+	0x69, 0x3a, 0x86, 0x3b, 0x19, 0x45, 0xe6, 0xfc, 0x3e, 0x20, 0x16, 0x24, 0x35, 0x9b, 0x1a, 0x87,
+	0xda, 0x50, 0x77, 0xf4, 0x01, 0xb3, 0x4b, 0xbc, 0xf0, 0x29, 0x8c, 0xb3, 0x4d, 0x8d, 0xc3, 0x1d,
+	0x49, 0x57, 0x3f, 0x00, 0xe8, 0x8d, 0x5c, 0xa2, 0x9b, 0x1d, 0x96, 0x4d, 0x30, 0xd7, 0xf1, 0x96,
+	0x66, 0xca, 0x17, 0x52, 0xea, 0xca, 0xad, 0xae, 0x08, 0x46, 0x23, 0xa4, 0xab, 0xbf, 0x0c, 0x37,
+	0xba, 0xb6, 0x6e, 0xf0, 0xaf, 0x05, 0xba, 0xe1, 0x93, 0x15, 0x7a, 0x0a, 0x59, 0x01, 0x95, 0x33,
+	0x19, 0xbb, 0xdd, 0xa6, 0x7d, 0x6e, 0xcd, 0x61, 0x89, 0xaf, 0x15, 0x01, 0xa6, 0x7a, 0xd4, 0xbf,
+	0x4c, 0x40, 0x3e, 0xd4, 0x8f, 0xca, 0xe2, 0x25, 0xc6, 0x77, 0x75, 0xcb, 0x91, 0x55, 0x7d, 0x1e,
+	0x47, 0x49, 0xa8, 0x05, 0x85, 0x51, 0x28, 0x7d, 0x65, 0x3e, 0x17, 0x63, 0x35, 0x8e, 0xca, 0xa2,
+	0x0f, 0x21, 0x1f, 0x3c, 0x49, 0x07, 0x11, 0xf6, 0xea, 0x17, 0xec, 0x29, 0x5c, 0xfd, 0x36, 0xc0,
+	0x77, 0xa8, 0xe5, 0xf4, 0xe9, 0x21, 0x71, 0xf8, 0x13, 0x2b, 0xab, 0x09, 0x49, 0xe0, 0x45, 0xd9,
+	0xe2, 0xa5, 0xbe, 0x98, 0x82, 0xf0, 0xa5, 0x51, 0x34, 0xd5, 0xbf, 0x4d, 0x42, 0x16, 0x53, 0xea,
+	0xd7, 0xab, 0xa8, 0x0c, 0x59, 0x19, 0x27, 0xf8, 0xf9, 0x53, 0xcb, 0x9f, 0x9f, 0xad, 0x67, 0x44,
+	0x80, 0xc8, 0x18, 0x3c, 0x32, 0x44, 0x22, 0x78, 0xf2, 0xb2, 0x08, 0x8e, 0x1e, 0x42, 0x51, 0x82,
+	0xb4, 0x03, 0xdd, 0x3b, 0x10, 0x05, 0x5a, 0x6d, 0xf1, 0xfc, 0x6c, 0x1d, 0x04, 0x72, 0x4b, 0xf7,
+	0x0e, 0x30, 0x08, 0x34, 0xfb, 0x8d, 0x9a, 0x50, 0xf8, 0x94, 0x5a, 0x8e, 0xe6, 0xf3, 0x41, 0xc8,
+	0xcb, 0xc4, 0xd8, 0x79, 0x9c, 0x0e, 0x55, 0x7e, 0x6f, 0x00, 0x9f, 0x4e, 0x07, 0xdf, 0x84, 0x05,
+	0x97, 0x52, 0x5f, 0x84, 0x2d, 0x8b, 0x3a, 0xf2, 0x9e, 0xa2, 0x1c, 0x7b, 0x7d, 0x4d, 0xa9, 0x8f,
+	0x25, 0x0e, 0x17, 0xdd, 0x48, 0x0b, 0x3d, 0x84, 0x15, 0x5b, 0xf7, 0x7c, 0x8d, 0xc7, 0x3b, 0x73,
+	0xaa, 0x2d, 0xcb, 0xb7, 0x1a, 0x62, 0xbc, 0x0d, 0xce, 0x0a, 0x24, 0xd4, 0x7f, 0x4e, 0x40, 0x81,
+	0x0d, 0xc6, 0xda, 0xb7, 0x0c, 0x96, 0xe4, 0x7d, 0xfe, 0xdc, 0xe3, 0x16, 0xa4, 0x0c, 0xcf, 0x95,
+	0x4e, 0xe5, 0x87, 0x6f, 0xbd, 0x87, 0x31, 0xa3, 0xa1, 0x4f, 0x20, 0x2b, 0xef, 0x4b, 0x44, 0xda,
+	0xa1, 0x5e, 0x9f, 0x8e, 0x4a, 0xdf, 0x48, 0x39, 0xbe, 0x96, 0xa7, 0xd6, 0x89, 0x43, 0x00, 0x47,
+	0x49, 0xe8, 0x26, 0x24, 0x0d, 0xe1, 0x2e, 0xf9, 0x41, 0x4b, 0xbd, 0x8d, 0x93, 0x86, 0xa3, 0xfe,
+	0x28, 0x01, 0x0b, 0xd3, 0x0d, 0xcf, 0x56, 0xc0, 0x6d, 0xc8, 0x7b, 0xe3, 0x3d, 0x6f, 0xe2, 0xf9,
+	0x64, 0x18, 0x3c, 0x1f, 0x87, 0x04, 0xd4, 0x82, 0xbc, 0x6e, 0x0f, 0xa8, 0x6b, 0xf9, 0x07, 0x43,
+	0x59, 0x89, 0xc6, 0xa7, 0x0a, 0x51, 0x9d, 0x95, 0x6a, 0x20, 0x82, 0xa7, 0xd2, 0xc1, 0xb9, 0x2f,
+	0xbe, 0x31, 0xe0, 0xe7, 0xfe, 0xdb, 0x50, 0xb4, 0xf5, 0x21, 0xbf, 0x40, 0xf2, 0xad, 0xa1, 0x18,
+	0x47, 0x1a, 0x17, 0x24, 0xad, 0x6f, 0x0d, 0x89, 0xaa, 0x42, 0x3e, 0x54, 0x86, 0x96, 0xa0, 0x50,
+	0x6d, 0xf6, 0xb4, 0x47, 0x8f, 0x9f, 0x6a, 0x9b, 0xf5, 0x1d, 0x65, 0x4e, 0xe6, 0xa6, 0x7f, 0x95,
+	0x80, 0x05, 0x19, 0x8e, 0x64, 0xbe, 0xff, 0x0e, 0xcc, 0xbb, 0xfa, 0xbe, 0x1f, 0x54, 0x24, 0x69,
+	0xb1, 0xaa, 0x59, 0x84, 0x67, 0x15, 0x09, 0x63, 0xc5, 0x57, 0x24, 0x91, 0x0f, 0x1a, 0x52, 0x57,
+	0x7e, 0xd0, 0x90, 0xfe, 0xb9, 0x7c, 0xd0, 0xa0, 0xfe, 0x26, 0xc0, 0x86, 0x65, 0x93, 0xbe, 0xb8,
+	0x6b, 0x8a, 0xab, 0x2f, 0x59, 0x0e, 0x27, 0xef, 0x32, 0x83, 0x1c, 0xae, 0xd5, 0xc0, 0x8c, 0xc6,
+	0x58, 0x03, 0xcb, 0x94, 0x9b, 0x91, 0xb3, 0x36, 0x19, 0x6b, 0x60, 0x99, 0xe1, 0xcb, 0x5b, 0xfa,
+	0xba, 0x97, 0xb7, 0xd3, 0x04, 0x2c, 0xc9, 0xdc, 0x35, 0x0c, 0xbf, 0x5f, 0x85, 0xbc, 0x48, 0x63,
+	0xa7, 0x05, 0x1d, 0x7f, 0xc4, 0x17, 0xb8, 0x56, 0x03, 0xe7, 0x04, 0xbb, 0x65, 0xa2, 0x75, 0x28,
+	0x48, 0x68, 0xe4, 0xe3, 0x27, 0x10, 0xa4, 0x36, 0x33, 0xff, 0xeb, 0x90, 0xde, 0xb7, 0x6c, 0x22,
+	0x17, 0x7a, 0x6c, 0x00, 0x98, 0x3a, 0x60, 0x6b, 0x0e, 0x73, 0x74, 0x2d, 0x17, 0x5c, 0xc6, 0x71,
+	0xfb, 0x64, 0xd9, 0x19, 0xb5, 0x4f, 0x54, 0xa0, 0x33, 0xf6, 0x09, 0x1c, 0xb3, 0x4f, 0xb0, 0x85,
+	0x7d, 0x12, 0x1a, 0xb5, 0x4f, 0x90, 0x7e, 0x2e, 0xf6, 0x6d, 0xc3, 0xcd, 0x9a, 0xad, 0x1b, 0x87,
+	0xb6, 0xe5, 0xf9, 0xc4, 0x8c, 0x46, 0x8c, 0xc7, 0x90, 0xbd, 0x90, 0x74, 0x5e, 0x75, 0x6b, 0x29,
+	0x91, 0xea, 0x7f, 0x24, 0xa0, 0xb8, 0x45, 0x74, 0xdb, 0x3f, 0x98, 0x5e, 0x0d, 0xf9, 0xc4, 0xf3,
+	0xe5, 0x61, 0xc5, 0x7f, 0xa3, 0x6f, 0x40, 0x2e, 0xcc, 0x49, 0xae, 0x7d, 0x7f, 0x0b, 0xa1, 0xe8,
+	0x09, 0xcc, 0xb3, 0x3d, 0x46, 0xc7, 0x41, 0xb1, 0x73, 0xd5, 0xd3, 0x8e, 0x44, 0xb2, 0x43, 0xc6,
+	0x25, 0x3c, 0x09, 0xe1, 0x4b, 0x29, 0x83, 0x83, 0x26, 0xfa, 0x45, 0x28, 0xf2, 0x97, 0x89, 0x20,
+	0xe7, 0xca, 0x5c, 0xa7, 0xb3, 0x20, 0x1e, 0x17, 0x45, 0xbe, 0xf5, 0xbf, 0x09, 0x58, 0xd9, 0xd1,
+	0x27, 0x7b, 0x44, 0x86, 0x0d, 0x62, 0x62, 0x62, 0x50, 0xd7, 0x44, 0xdd, 0x68, 0xb8, 0xb9, 0xe2,
+	0xad, 0x32, 0x4e, 0x38, 0x3e, 0xea, 0x04, 0x05, 0x58, 0x32, 0x52, 0x80, 0xad, 0x40, 0xc6, 0xa1,
+	0x8e, 0x41, 0x64, 0x2c, 0x12, 0x0d, 0xd5, 0x8a, 0x86, 0x9a, 0x52, 0xf8, 0x8c, 0xc8, 0x1f, 0x01,
+	0xdb, 0xd4, 0x0f, 0x7b, 0x43, 0x9f, 0x40, 0xa9, 0xd7, 0xac, 0xe3, 0x66, 0xbf, 0xd6, 0xf9, 0xae,
+	0xd6, 0xab, 0x6e, 0xf7, 0xaa, 0x8f, 0x1f, 0x6a, 0xdd, 0xce, 0xf6, 0xf7, 0x1e, 0x3d, 0x79, 0xf8,
+	0x0d, 0x25, 0x51, 0x2a, 0x9f, 0x9c, 0x96, 0x6f, 0xb7, 0xab, 0xf5, 0x6d, 0xb1, 0x63, 0xf6, 0xe8,
+	0xcb, 0x9e, 0x6e, 0x7b, 0xfa, 0xe3, 0x87, 0x5d, 0x6a, 0x4f, 0x18, 0x86, 0x2d, 0xeb, 0x62, 0xf4,
+	0xbc, 0x8a, 0x1e, 0xc3, 0x89, 0x4b, 0x8f, 0xe1, 0xe9, 0x69, 0x9e, 0xbc, 0xe4, 0x34, 0xdf, 0x80,
+	0x15, 0xc3, 0xa5, 0x9e, 0xa7, 0xb1, 0xec, 0x9f, 0x98, 0x33, 0xf5, 0xc5, 0x97, 0xce, 0xcf, 0xd6,
+	0x97, 0xeb, 0x8c, 0xdf, 0xe3, 0x6c, 0xa9, 0x7e, 0xd9, 0x88, 0x90, 0x78, 0x4f, 0xea, 0x1f, 0xa5,
+	0x58, 0x22, 0x65, 0x1d, 0x59, 0x36, 0x19, 0x10, 0x0f, 0x3d, 0x87, 0x25, 0xc3, 0x25, 0x26, 0x4b,
+	0xeb, 0x75, 0x3b, 0xfa, 0x11, 0xed, 0x2f, 0xc4, 0xe6, 0x34, 0xa1, 0x60, 0xa5, 0x1e, 0x4a, 0xf5,
+	0x46, 0xc4, 0xc0, 0x8b, 0xc6, 0x85, 0x36, 0xfa, 0x14, 0x96, 0x3c, 0x62, 0x5b, 0xce, 0xf8, 0xa5,
+	0x66, 0x50, 0xc7, 0x27, 0x2f, 0x83, 0x17, 0xb1, 0xeb, 0xf4, 0xf6, 0x9a, 0xdb, 0x4c, 0xaa, 0x2e,
+	0x84, 0x6a, 0xe8, 0xfc, 0x6c, 0x7d, 0xf1, 0x22, 0x0d, 0x2f, 0x4a, 0xcd, 0xb2, 0x5d, 0x6a, 0xc3,
+	0xe2, 0x45, 0x6b, 0xd0, 0x8a, 0xdc, 0xfb, 0x3c, 0x84, 0x04, 0x7b, 0x1b, 0xdd, 0x86, 0x9c, 0x4b,
+	0x06, 0x96, 0xe7, 0xbb, 0xc2, 0xcd, 0x8c, 0x13, 0x52, 0xd8, 0xce, 0x17, 0x5f, 0x40, 0x95, 0x7e,
+	0x1d, 0x66, 0x7a, 0x64, 0x9b, 0xc5, 0xb4, 0x3c, 0x7d, 0x4f, 0xaa, 0xcc, 0xe1, 0xa0, 0xc9, 0xd6,
+	0xe0, 0xd8, 0x0b, 0x13, 0x35, 0xfe, 0x9b, 0xd1, 0x78, 0x46, 0x21, 0xbf, 0x07, 0xe3, 0x39, 0x43,
+	0xf0, 0x61, 0x69, 0x3a, 0xf2, 0x61, 0xe9, 0x0a, 0x64, 0x6c, 0x72, 0x44, 0x6c, 0x71, 0x96, 0x63,
+	0xd1, 0xb8, 0xf7, 0x10, 0x8a, 0xc1, 0x17, 0x8c, 0xfc, 0xcb, 0x89, 0x1c, 0xa4, 0xfb, 0xd5, 0xde,
+	0x33, 0x65, 0x0e, 0x01, 0x64, 0xc5, 0xe2, 0x14, 0xaf, 0x75, 0xf5, 0x4e, 0x7b, 0xa3, 0xb5, 0xa9,
+	0x24, 0xef, 0xfd, 0x2c, 0x05, 0xf9, 0xf0, 0xbd, 0x88, 0x9d, 0x1d, 0xed, 0xe6, 0x8b, 0x60, 0x75,
+	0x87, 0xf4, 0x36, 0x39, 0x46, 0x6f, 0x4f, 0x6f, 0xa1, 0x3e, 0x11, 0x0f, 0xe4, 0x21, 0x3b, 0xb8,
+	0x81, 0x7a, 0x17, 0x72, 0xd5, 0x5e, 0xaf, 0xb5, 0xd9, 0x6e, 0x36, 0x94, 0xcf, 0x12, 0xa5, 0x2f,
+	0x9d, 0x9c, 0x96, 0x97, 0x43, 0x50, 0xd5, 0x13, 0x8b, 0x8f, 0xa3, 0xea, 0xf5, 0x66, 0xb7, 0xdf,
+	0x6c, 0x28, 0xaf, 0x92, 0xb3, 0x28, 0x7e, 0xab, 0xc2, 0x3f, 0xdd, 0xc9, 0x77, 0x71, 0xb3, 0x5b,
+	0xc5, 0xac, 0xc3, 0xcf, 0x92, 0xe2, 0x72, 0x6c, 0xda, 0xa3, 0x4b, 0x46, 0xba, 0xcb, 0xfa, 0x5c,
+	0x0b, 0xbe, 0x85, 0x7b, 0x95, 0x12, 0x9f, 0x77, 0x4c, 0x1f, 0xbf, 0x88, 0x6e, 0x4e, 0x58, 0x6f,
+	0xfc, 0xd5, 0x91, 0xab, 0x49, 0xcd, 0xf4, 0xd6, 0x63, 0xb1, 0x87, 0x69, 0x51, 0x61, 0x1e, 0xef,
+	0xb6, 0xdb, 0x0c, 0xf4, 0x2a, 0x3d, 0x33, 0x3a, 0x3c, 0x76, 0x58, 0xc5, 0x8c, 0xee, 0x42, 0x2e,
+	0x78, 0x94, 0x54, 0x3e, 0x4b, 0xcf, 0x18, 0x54, 0x0f, 0x5e, 0x54, 0x79, 0x87, 0x5b, 0xbb, 0x7d,
+	0xfe, 0xa9, 0xde, 0xab, 0xcc, 0x6c, 0x87, 0x07, 0x63, 0xdf, 0xa4, 0xc7, 0x0e, 0xdb, 0xb3, 0xf2,
+	0x1e, 0xee, 0xb3, 0x8c, 0xb8, 0xb4, 0x08, 0x31, 0xf2, 0x12, 0xee, 0x5d, 0xc8, 0xe1, 0xe6, 0x77,
+	0xc4, 0x57, 0x7d, 0xaf, 0xb2, 0x33, 0x7a, 0x30, 0xf9, 0x94, 0x18, 0xb2, 0xb7, 0x0e, 0xee, 0x6e,
+	0x55, 0xb9, 0xcb, 0x67, 0x51, 0x1d, 0x77, 0x74, 0xa0, 0x3b, 0xc4, 0x9c, 0x7e, 0xe3, 0x12, 0xb2,
+	0xee, 0xfd, 0x0a, 0xe4, 0x82, 0xcc, 0x14, 0xad, 0x41, 0xf6, 0x45, 0x07, 0x3f, 0x6b, 0x62, 0x65,
+	0x4e, 0xf8, 0x30, 0xe0, 0xbc, 0x10, 0x35, 0x45, 0x19, 0xe6, 0x77, 0xaa, 0xed, 0xea, 0x66, 0x13,
+	0x07, 0x57, 0xe4, 0x01, 0x40, 0xa6, 0x57, 0x25, 0x45, 0x76, 0x10, 0xea, 0xac, 0xad, 0xfe, 0xf0,
+	0x27, 0x6b, 0x73, 0x3f, 0xfe, 0xc9, 0xda, 0xdc, 0xab, 0xf3, 0xb5, 0xc4, 0x0f, 0xcf, 0xd7, 0x12,
+	0xff, 0x70, 0xbe, 0x96, 0xf8, 0xf7, 0xf3, 0xb5, 0xc4, 0x5e, 0x96, 0x1f, 0x02, 0x4f, 0xfe, 0x2f,
+	0x00, 0x00, 0xff, 0xff, 0x4b, 0xdb, 0xdc, 0xec, 0xf0, 0x31, 0x00, 0x00,
 }
diff --git a/vendor/github.com/docker/swarmkit/api/types.proto b/vendor/github.com/docker/swarmkit/api/types.proto
index eaf037e..635d12b 100644
--- a/vendor/github.com/docker/swarmkit/api/types.proto
+++ b/vendor/github.com/docker/swarmkit/api/types.proto
@@ -214,6 +214,18 @@
 	// ReadOnly should be set to true if the mount should not be writable.
 	bool readonly = 4 [(gogoproto.customname) = "ReadOnly"];
 
+	// Consistency indicates the tolerable level of file system consistency
+	enum Consistency {
+		option (gogoproto.goproto_enum_prefix) = false;
+		option (gogoproto.enum_customname) = "MountConsistency";
+
+		DEFAULT = 0 [(gogoproto.enumvalue_customname) = "MountConsistencyDefault"];
+		CONSISTENT = 1 [(gogoproto.enumvalue_customname) = "MountConsistencyFull"];
+		CACHED = 2 [(gogoproto.enumvalue_customname) = "MountConsistencyCached"];
+		DELEGATED = 3 [(gogoproto.enumvalue_customname) = "MountConsistencyDelegated"];
+	}
+	Consistency consistency = 8;
+
 	// BindOptions specifies options that are specific to a bind mount.
 	message BindOptions {
 		enum Propagation {
@@ -497,12 +509,17 @@
 	// field.
 	string message = 3;
 
-	// Err is set if the task is in an error state.
+	// Err is set if the task is in an error state, or is unable to
+	// progress from an earlier state because a precondition is
+	// unsatisfied.
 	//
 	// The following states should report a companion error:
 	//
 	//	FAILED, REJECTED
 	//
+	// In general, messages that should be surfaced to users belong in the
+	// Err field, and notes on routine state transitions belong in Message.
+	//
 	// TODO(stevvooe) Integrate this field with the error interface.
 	string err = 4;
 
diff --git a/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go b/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go
index 964cb3e..4a7c0a0 100644
--- a/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go
+++ b/vendor/github.com/docker/swarmkit/ca/keyreadwriter.go
@@ -187,10 +187,7 @@
 		return err
 	}
 
-	if err := k.writeKey(keyBlock, updatedKEK, updatedHeaderObj); err != nil {
-		return err
-	}
-	return nil
+	return k.writeKey(keyBlock, updatedKEK, updatedHeaderObj)
 }
 
 // ViewAndUpdateHeaders updates the header manager, and updates any headers on the existing key
diff --git a/vendor/github.com/docker/swarmkit/connectionbroker/broker.go b/vendor/github.com/docker/swarmkit/connectionbroker/broker.go
index a0ba7cf..43b384a 100644
--- a/vendor/github.com/docker/swarmkit/connectionbroker/broker.go
+++ b/vendor/github.com/docker/swarmkit/connectionbroker/broker.go
@@ -4,7 +4,9 @@
 package connectionbroker
 
 import (
+	"net"
 	"sync"
+	"time"
 
 	"github.com/docker/swarmkit/api"
 	"github.com/docker/swarmkit/remotes"
@@ -60,9 +62,14 @@
 		return nil, err
 	}
 
+	// gRPC dialer connects to proxy first. Provide a custom dialer here avoid that.
+	// TODO(anshul) Add an option to configure this.
 	dialOpts = append(dialOpts,
 		grpc.WithUnaryInterceptor(grpc_prometheus.UnaryClientInterceptor),
-		grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor))
+		grpc.WithStreamInterceptor(grpc_prometheus.StreamClientInterceptor),
+		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+			return net.DialTimeout("tcp", addr, timeout)
+		}))
 
 	cc, err := grpc.Dial(peer.Addr, dialOpts...)
 	if err != nil {
diff --git a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
index 2cfc951..53f9ffb 100644
--- a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
+++ b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/networkallocator.go
@@ -48,8 +48,10 @@
 	tasks map[string]struct{}
 
 	// Allocator state to indicate if allocation has been
-	// successfully completed for this node.
-	nodes map[string]struct{}
+	// successfully completed for this node on this network.
+	// outer map key: node id
+	// inner map key: network id
+	nodes map[string]map[string]struct{}
 }
 
 // Local in-memory state related to network that need to be tracked by cnmNetworkAllocator
@@ -89,7 +91,7 @@
 		networks: make(map[string]*network),
 		services: make(map[string]struct{}),
 		tasks:    make(map[string]struct{}),
-		nodes:    make(map[string]struct{}),
+		nodes:    make(map[string]map[string]struct{}),
 	}
 
 	// There are no driver configurations and notification
@@ -430,56 +432,6 @@
 	return true
 }
 
-// IsNodeAllocated returns if the passed node has its network resources allocated or not.
-func (na *cnmNetworkAllocator) IsNodeAllocated(node *api.Node) bool {
-	// If the node is not found in the allocated set, then it is
-	// not allocated.
-	if _, ok := na.nodes[node.ID]; !ok {
-		return false
-	}
-
-	// If no attachment, not allocated.
-	if node.Attachment == nil {
-		return false
-	}
-
-	// If the network is not allocated, the node cannot be allocated.
-	localNet, ok := na.networks[node.Attachment.Network.ID]
-	if !ok {
-		return false
-	}
-
-	// Addresses empty, not allocated.
-	if len(node.Attachment.Addresses) == 0 {
-		return false
-	}
-
-	// The allocated IP address not found in local endpoint state. Not allocated.
-	if _, ok := localNet.endpoints[node.Attachment.Addresses[0]]; !ok {
-		return false
-	}
-
-	return true
-}
-
-// AllocateNode allocates the IP addresses for the network to which
-// the node is attached.
-func (na *cnmNetworkAllocator) AllocateNode(node *api.Node) error {
-	if err := na.allocateNetworkIPs(node.Attachment); err != nil {
-		return err
-	}
-
-	na.nodes[node.ID] = struct{}{}
-	return nil
-}
-
-// DeallocateNode deallocates the IP addresses for the network to
-// which the node is attached.
-func (na *cnmNetworkAllocator) DeallocateNode(node *api.Node) error {
-	delete(na.nodes, node.ID)
-	return na.releaseEndpoints([]*api.NetworkAttachment{node.Attachment})
-}
-
 // AllocateTask allocates all the endpoint resources for all the
 // networks that a task is attached to.
 func (na *cnmNetworkAllocator) AllocateTask(t *api.Task) error {
@@ -489,7 +441,7 @@
 		}
 		if err := na.allocateNetworkIPs(nAttach); err != nil {
 			if err := na.releaseEndpoints(t.Networks[:i]); err != nil {
-				log.G(context.TODO()).WithError(err).Errorf("Failed to release IP addresses while rolling back allocation for task %s network %s", t.ID, nAttach.Network.ID)
+				log.G(context.TODO()).WithError(err).Errorf("failed to release IP addresses while rolling back allocation for task %s network %s", t.ID, nAttach.Network.ID)
 			}
 			return errors.Wrapf(err, "failed to allocate network IP for task %s network %s", t.ID, nAttach.Network.ID)
 		}
@@ -507,6 +459,75 @@
 	return na.releaseEndpoints(t.Networks)
 }
 
+// IsAttachmentAllocated returns if the passed node and network has resources allocated or not.
+func (na *cnmNetworkAllocator) IsAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool {
+	if node == nil {
+		return false
+	}
+
+	if networkAttachment == nil {
+		return false
+	}
+
+	// If the node is not found in the allocated set, then it is
+	// not allocated.
+	if _, ok := na.nodes[node.ID]; !ok {
+		return false
+	}
+
+	// If the nework is not found in the allocated set, then it is
+	// not allocated.
+	if _, ok := na.nodes[node.ID][networkAttachment.Network.ID]; !ok {
+		return false
+	}
+
+	// If the network is not allocated, the node cannot be allocated.
+	localNet, ok := na.networks[networkAttachment.Network.ID]
+	if !ok {
+		return false
+	}
+
+	// Addresses empty, not allocated.
+	if len(networkAttachment.Addresses) == 0 {
+		return false
+	}
+
+	// The allocated IP address not found in local endpoint state. Not allocated.
+	if _, ok := localNet.endpoints[networkAttachment.Addresses[0]]; !ok {
+		return false
+	}
+
+	return true
+}
+
+// AllocateAttachment allocates the IP addresses for a LB in a network
+// on a given node
+func (na *cnmNetworkAllocator) AllocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error {
+
+	if err := na.allocateNetworkIPs(networkAttachment); err != nil {
+		return err
+	}
+
+	if na.nodes[node.ID] == nil {
+		na.nodes[node.ID] = make(map[string]struct{})
+	}
+	na.nodes[node.ID][networkAttachment.Network.ID] = struct{}{}
+
+	return nil
+}
+
+// DeallocateAttachment deallocates the IP addresses for a LB in a network to
+// which the node is attached.
+func (na *cnmNetworkAllocator) DeallocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error {
+
+	delete(na.nodes[node.ID], networkAttachment.Network.ID)
+	if len(na.nodes[node.ID]) == 0 {
+		delete(na.nodes, node.ID)
+	}
+
+	return na.releaseEndpoints([]*api.NetworkAttachment{networkAttachment})
+}
+
 func (na *cnmNetworkAllocator) releaseEndpoints(networks []*api.NetworkAttachment) error {
 	for _, nAttach := range networks {
 		localNet := na.getNetwork(nAttach.Network.ID)
@@ -553,6 +574,7 @@
 
 // allocate virtual IP for a single endpoint attachment of the service.
 func (na *cnmNetworkAllocator) allocateVIP(vip *api.Endpoint_VirtualIP) error {
+	var opts map[string]string
 	localNet := na.getNetwork(vip.NetworkID)
 	if localNet == nil {
 		return errors.New("networkallocator: could not find local network state")
@@ -582,9 +604,13 @@
 			return err
 		}
 	}
+	if localNet.nw.IPAM != nil && localNet.nw.IPAM.Driver != nil {
+		// set ipam allocation method to serial
+		opts = setIPAMSerialAlloc(localNet.nw.IPAM.Driver.Options)
+	}
 
 	for _, poolID := range localNet.pools {
-		ip, _, err := ipam.RequestAddress(poolID, addr, nil)
+		ip, _, err := ipam.RequestAddress(poolID, addr, opts)
 		if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {
 			return errors.Wrap(err, "could not allocate VIP from IPAM")
 		}
@@ -636,6 +662,7 @@
 // allocate the IP addresses for a single network attachment of the task.
 func (na *cnmNetworkAllocator) allocateNetworkIPs(nAttach *api.NetworkAttachment) error {
 	var ip *net.IPNet
+	var opts map[string]string
 
 	ipam, _, _, err := na.resolveIPAM(nAttach.Network)
 	if err != nil {
@@ -665,11 +692,16 @@
 				}
 			}
 		}
+		// Set the ipam options if the network has an ipam driver.
+		if localNet.nw.IPAM != nil && localNet.nw.IPAM.Driver != nil {
+			// set ipam allocation method to serial
+			opts = setIPAMSerialAlloc(localNet.nw.IPAM.Driver.Options)
+		}
 
 		for _, poolID := range localNet.pools {
 			var err error
 
-			ip, _, err = ipam.RequestAddress(poolID, addr, nil)
+			ip, _, err = ipam.RequestAddress(poolID, addr, opts)
 			if err != nil && err != ipamapi.ErrNoAvailableIPs && err != ipamapi.ErrIPOutOfRange {
 				return errors.Wrap(err, "could not allocate IP from IPAM")
 			}
@@ -897,8 +929,16 @@
 			}
 			gwIP.IP = ip
 		}
+		if dOptions == nil {
+			dOptions = make(map[string]string)
+		}
+		dOptions[ipamapi.RequestAddressType] = netlabel.Gateway
+		// set ipam allocation method to serial
+		dOptions = setIPAMSerialAlloc(dOptions)
+		defer delete(dOptions, ipamapi.RequestAddressType)
+
 		if ic.Gateway != "" || gwIP == nil {
-			gwIP, _, err = ipam.RequestAddress(poolID, net.ParseIP(ic.Gateway), map[string]string{ipamapi.RequestAddressType: netlabel.Gateway})
+			gwIP, _, err = ipam.RequestAddress(poolID, net.ParseIP(ic.Gateway), dOptions)
 			if err != nil {
 				// Rollback by releasing all the resources allocated so far.
 				releasePools(ipam, ipamConfigs[:i], pools)
@@ -959,3 +999,14 @@
 	}
 	return false
 }
+
+// setIPAMSerialAlloc sets the ipam allocation method to serial
+func setIPAMSerialAlloc(opts map[string]string) map[string]string {
+	if opts == nil {
+		opts = make(map[string]string)
+	}
+	if _, ok := opts[ipamapi.AllocSerialPrefix]; !ok {
+		opts[ipamapi.AllocSerialPrefix] = "true"
+	}
+	return opts
+}
diff --git a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
index b09ac47..19dcbec 100644
--- a/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
+++ b/vendor/github.com/docker/swarmkit/manager/allocator/cnmallocator/portallocator.go
@@ -382,7 +382,7 @@
 	}
 
 	// Check out an arbitrary port from dynamic port space.
-	swarmPort, err := ps.dynamicPortSpace.GetID()
+	swarmPort, err := ps.dynamicPortSpace.GetID(true)
 	if err != nil {
 		return
 	}
diff --git a/vendor/github.com/docker/swarmkit/manager/allocator/network.go b/vendor/github.com/docker/swarmkit/manager/allocator/network.go
index c760ad5..ac798c9 100644
--- a/vendor/github.com/docker/swarmkit/manager/allocator/network.go
+++ b/vendor/github.com/docker/swarmkit/manager/allocator/network.go
@@ -154,11 +154,10 @@
 
 	// First, allocate objects that already have addresses associated with
 	// them, to reserve these IP addresses in internal state.
-	if nc.ingressNetwork != nil {
-		if err := a.allocateNodes(ctx, true); err != nil {
-			return err
-		}
+	if err := a.allocateNodes(ctx, true); err != nil {
+		return err
 	}
+
 	if err := a.allocateServices(ctx, true); err != nil {
 		return err
 	}
@@ -166,20 +165,14 @@
 		return err
 	}
 
-	// Now allocate objects that don't have addresses yet.
-	if nc.ingressNetwork != nil {
-		if err := a.allocateNodes(ctx, false); err != nil {
-			return err
-		}
-	}
-	if err := a.allocateServices(ctx, false); err != nil {
-		return err
-	}
-	if err := a.allocateTasks(ctx, false); err != nil {
+	if err := a.allocateNodes(ctx, false); err != nil {
 		return err
 	}
 
-	return nil
+	if err := a.allocateServices(ctx, false); err != nil {
+		return err
+	}
+	return a.allocateTasks(ctx, false)
 }
 
 func (a *Allocator) doNetworkAlloc(ctx context.Context, ev events.Event) {
@@ -208,22 +201,22 @@
 		}); err != nil {
 			log.G(ctx).WithError(err).Errorf("Failed to commit allocation for network %s", n.ID)
 		}
-
 		if IsIngressNetwork(n) {
 			nc.ingressNetwork = n
-			err := a.allocateNodes(ctx, false)
-			if err != nil {
-				log.G(ctx).WithError(err).Error(err)
-			}
+		}
+		err := a.allocateNodes(ctx, false)
+		if err != nil {
+			log.G(ctx).WithError(err).Error(err)
 		}
 	case api.EventDeleteNetwork:
 		n := v.Network.Copy()
 
 		if IsIngressNetwork(n) && nc.ingressNetwork != nil && nc.ingressNetwork.ID == n.ID {
 			nc.ingressNetwork = nil
-			if err := a.deallocateNodes(ctx); err != nil {
-				log.G(ctx).WithError(err).Error(err)
-			}
+		}
+
+		if err := a.deallocateNodeAttachments(ctx, n.ID); err != nil {
+			log.G(ctx).WithError(err).Error(err)
 		}
 
 		// The assumption here is that all dependent objects
@@ -361,33 +354,67 @@
 	nc := a.netCtx
 
 	if isDelete {
-		if nc.nwkAllocator.IsNodeAllocated(node) {
-			if err := nc.nwkAllocator.DeallocateNode(node); err != nil {
-				log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID)
-			} else {
-				nc.somethingWasDeallocated = true
+		if err := a.deallocateNode(node); err != nil {
+			log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID)
+		} else {
+			nc.somethingWasDeallocated = true
+		}
+	} else {
+		allocatedNetworks, err := a.getAllocatedNetworks()
+		if err != nil {
+			log.G(ctx).WithError(err).Errorf("Error listing allocated networks in network %s", node.ID)
+		}
+
+		isAllocated := a.allocateNode(ctx, node, false, allocatedNetworks)
+
+		if isAllocated {
+			if err := a.store.Batch(func(batch *store.Batch) error {
+				return a.commitAllocatedNode(ctx, batch, node)
+			}); err != nil {
+				log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s", node.ID)
 			}
 		}
-		return
+	}
+}
+
+func isOverlayNetwork(n *api.Network) bool {
+	if n.DriverState != nil && n.DriverState.Name == "overlay" {
+		return true
 	}
 
-	if !nc.nwkAllocator.IsNodeAllocated(node) && nc.ingressNetwork != nil {
-		if node.Attachment == nil {
-			node.Attachment = &api.NetworkAttachment{}
-		}
+	if n.Spec.DriverConfig != nil && n.Spec.DriverConfig.Name == "overlay" {
+		return true
+	}
 
-		node.Attachment.Network = nc.ingressNetwork.Copy()
-		if err := a.allocateNode(ctx, node); err != nil {
-			log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID)
-			return
-		}
+	return false
+}
 
-		if err := a.store.Batch(func(batch *store.Batch) error {
-			return a.commitAllocatedNode(ctx, batch, node)
-		}); err != nil {
-			log.G(ctx).WithError(err).Errorf("Failed to commit allocation of network resources for node %s", node.ID)
+func (a *Allocator) getAllocatedNetworks() ([]*api.Network, error) {
+	var (
+		err               error
+		nc                = a.netCtx
+		na                = nc.nwkAllocator
+		allocatedNetworks []*api.Network
+	)
+
+	// Find allocated networks
+	var networks []*api.Network
+	a.store.View(func(tx store.ReadTx) {
+		networks, err = store.FindNetworks(tx, store.All)
+	})
+
+	if err != nil {
+		return nil, errors.Wrap(err, "error listing all networks in store while trying to allocate during init")
+	}
+
+	for _, n := range networks {
+
+		if isOverlayNetwork(n) && na.IsAllocated(n) {
+			allocatedNetworks = append(allocatedNetworks, n)
 		}
 	}
+
+	return allocatedNetworks, nil
 }
 
 func (a *Allocator) allocateNodes(ctx context.Context, existingAddressesOnly bool) error {
@@ -396,7 +423,6 @@
 		allocatedNodes []*api.Node
 		nodes          []*api.Node
 		err            error
-		nc             = a.netCtx
 	)
 
 	a.store.View(func(tx store.ReadTx) {
@@ -406,26 +432,16 @@
 		return errors.Wrap(err, "error listing all nodes in store while trying to allocate network resources")
 	}
 
+	allocatedNetworks, err := a.getAllocatedNetworks()
+	if err != nil {
+		return errors.Wrap(err, "error listing all nodes in store while trying to allocate network resources")
+	}
+
 	for _, node := range nodes {
-		if nc.nwkAllocator.IsNodeAllocated(node) {
-			continue
+		isAllocated := a.allocateNode(ctx, node, existingAddressesOnly, allocatedNetworks)
+		if isAllocated {
+			allocatedNodes = append(allocatedNodes, node)
 		}
-
-		if node.Attachment == nil {
-			node.Attachment = &api.NetworkAttachment{}
-		}
-
-		if existingAddressesOnly && len(node.Attachment.Addresses) == 0 {
-			continue
-		}
-
-		node.Attachment.Network = nc.ingressNetwork.Copy()
-		if err := a.allocateNode(ctx, node); err != nil {
-			log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID)
-			continue
-		}
-
-		allocatedNodes = append(allocatedNodes, node)
 	}
 
 	if err := a.store.Batch(func(batch *store.Batch) error {
@@ -457,21 +473,90 @@
 	}
 
 	for _, node := range nodes {
-		if nc.nwkAllocator.IsNodeAllocated(node) {
-			if err := nc.nwkAllocator.DeallocateNode(node); err != nil {
-				log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID)
-			} else {
-				nc.somethingWasDeallocated = true
+		if err := a.deallocateNode(node); err != nil {
+			log.G(ctx).WithError(err).Errorf("Failed freeing network resources for node %s", node.ID)
+		} else {
+			nc.somethingWasDeallocated = true
+		}
+		if err := a.store.Batch(func(batch *store.Batch) error {
+			return a.commitAllocatedNode(ctx, batch, node)
+		}); err != nil {
+			log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID)
+		}
+	}
+
+	return nil
+}
+
+func (a *Allocator) deallocateNodeAttachments(ctx context.Context, nid string) error {
+	var (
+		nodes []*api.Node
+		nc    = a.netCtx
+		err   error
+	)
+
+	a.store.View(func(tx store.ReadTx) {
+		nodes, err = store.FindNodes(tx, store.All)
+	})
+	if err != nil {
+		return fmt.Errorf("error listing all nodes in store while trying to free network resources")
+	}
+
+	for _, node := range nodes {
+
+		var networkAttachment *api.NetworkAttachment
+		var naIndex int
+		for index, na := range node.Attachments {
+			if na.Network.ID == nid {
+				networkAttachment = na
+				naIndex = index
+				break
 			}
-			node.Attachment = nil
-			if err := a.store.Batch(func(batch *store.Batch) error {
-				return a.commitAllocatedNode(ctx, batch, node)
-			}); err != nil {
+		}
+
+		if networkAttachment == nil {
+			log.G(ctx).Errorf("Failed to find network %s on node %s", nid, node.ID)
+			continue
+		}
+
+		if nc.nwkAllocator.IsAttachmentAllocated(node, networkAttachment) {
+			if err := nc.nwkAllocator.DeallocateAttachment(node, networkAttachment); err != nil {
 				log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID)
+			} else {
+
+				// Delete the lbattachment
+				node.Attachments[naIndex] = node.Attachments[len(node.Attachments)-1]
+				node.Attachments[len(node.Attachments)-1] = nil
+				node.Attachments = node.Attachments[:len(node.Attachments)-1]
+
+				if err := a.store.Batch(func(batch *store.Batch) error {
+					return a.commitAllocatedNode(ctx, batch, node)
+				}); err != nil {
+					log.G(ctx).WithError(err).Errorf("Failed to commit deallocation of network resources for node %s", node.ID)
+				}
+
+			}
+		}
+
+	}
+	return nil
+}
+
+func (a *Allocator) deallocateNode(node *api.Node) error {
+	var (
+		nc = a.netCtx
+	)
+
+	for _, na := range node.Attachments {
+		if nc.nwkAllocator.IsAttachmentAllocated(node, na) {
+			if err := nc.nwkAllocator.DeallocateAttachment(node, na); err != nil {
+				return err
 			}
 		}
 	}
 
+	node.Attachments = nil
+
 	return nil
 }
 
@@ -758,8 +843,48 @@
 	nc.pendingTasks[t.ID] = t
 }
 
-func (a *Allocator) allocateNode(ctx context.Context, node *api.Node) error {
-	return a.netCtx.nwkAllocator.AllocateNode(node)
+func (a *Allocator) allocateNode(ctx context.Context, node *api.Node, existingAddressesOnly bool, networks []*api.Network) bool {
+	var allocated bool
+
+	nc := a.netCtx
+
+	for _, network := range networks {
+
+		var lbAttachment *api.NetworkAttachment
+		for _, na := range node.Attachments {
+			if na.Network != nil && na.Network.ID == network.ID {
+				lbAttachment = na
+				break
+			}
+		}
+
+		if lbAttachment != nil {
+			if nc.nwkAllocator.IsAttachmentAllocated(node, lbAttachment) {
+				continue
+			}
+		}
+
+		if lbAttachment == nil {
+			lbAttachment = &api.NetworkAttachment{}
+			node.Attachments = append(node.Attachments, lbAttachment)
+		}
+
+		if existingAddressesOnly && len(lbAttachment.Addresses) == 0 {
+			continue
+		}
+
+		lbAttachment.Network = network.Copy()
+		if err := a.netCtx.nwkAllocator.AllocateAttachment(node, lbAttachment); err != nil {
+			log.G(ctx).WithError(err).Errorf("Failed to allocate network resources for node %s", node.ID)
+			// TODO: Should we add a unallocatedNode and retry allocating resources like we do for network, tasks, services?
+			// right now, we will only retry allocating network resources for the node when the node is updated.
+			continue
+		}
+
+		allocated = true
+	}
+	return allocated
+
 }
 
 func (a *Allocator) commitAllocatedNode(ctx context.Context, batch *store.Batch, node *api.Node) error {
@@ -768,13 +893,13 @@
 
 		if err == store.ErrSequenceConflict {
 			storeNode := store.GetNode(tx, node.ID)
-			storeNode.Attachment = node.Attachment.Copy()
+			storeNode.Attachments = node.Attachments
 			err = store.UpdateNode(tx, storeNode)
 		}
 
 		return errors.Wrapf(err, "failed updating state in store transaction for node %s", node.ID)
 	}); err != nil {
-		if err := a.netCtx.nwkAllocator.DeallocateNode(node); err != nil {
+		if err := a.deallocateNode(node); err != nil {
 			log.G(ctx).WithError(err).Errorf("failed rolling back allocation of node %s", node.ID)
 		}
 
@@ -1159,9 +1284,11 @@
 
 // updateTaskStatus sets TaskStatus and updates timestamp.
 func updateTaskStatus(t *api.Task, newStatus api.TaskState, message string) {
-	t.Status.State = newStatus
-	t.Status.Message = message
-	t.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
+	t.Status = api.TaskStatus{
+		State:     newStatus,
+		Message:   message,
+		Timestamp: ptypes.MustTimestampProto(time.Now()),
+	}
 }
 
 // IsIngressNetwork returns whether the passed network is an ingress network.
diff --git a/vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go b/vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
index 04dd168..f6b69b4 100644
--- a/vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
+++ b/vendor/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go
@@ -66,22 +66,6 @@
 	HostPublishPortsNeedUpdate(s *api.Service) bool
 
 	//
-	// Node Allocation
-	//
-
-	// IsNodeAllocated returns if the passed node has its network
-	// resources allocated or not.
-	IsNodeAllocated(node *api.Node) bool
-
-	// AllocateNode allocates the IP addresses for the network to which
-	// the node is attached.
-	AllocateNode(node *api.Node) error
-
-	// DeallocateNode deallocates the IP addresses for the network to
-	// which the node is attached.
-	DeallocateNode(node *api.Node) error
-
-	//
 	// Task Allocation
 	//
 
@@ -96,6 +80,15 @@
 	// DeallocateTask releases all the endpoint resources for all the
 	// networks that a task is attached to.
 	DeallocateTask(t *api.Task) error
+
+	// AllocateAttachment Allocates a load balancer endpoint for the node
+	AllocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error
+
+	// DeallocateAttachment Deallocates a load balancer endpoint for the node
+	DeallocateAttachment(node *api.Node, networkAttachment *api.NetworkAttachment) error
+
+	// IsAttachmentAllocated If lb endpoint is allocated on the node
+	IsAttachmentAllocated(node *api.Node, networkAttachment *api.NetworkAttachment) bool
 }
 
 // IsIngressNetwork check if the network is an ingress network
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/network.go b/vendor/github.com/docker/swarmkit/manager/controlapi/network.go
index 708f36b..b150de0 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/network.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/network.go
@@ -96,11 +96,7 @@
 		return err
 	}
 
-	if err := validateIPAM(spec.IPAM, pg); err != nil {
-		return err
-	}
-
-	return nil
+	return validateIPAM(spec.IPAM, pg)
 }
 
 // CreateNetwork creates and returns a Network based on the provided NetworkSpec.
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/node.go b/vendor/github.com/docker/swarmkit/manager/controlapi/node.go
index f3ee9e4..bac6b80 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/node.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/node.go
@@ -248,6 +248,29 @@
 	}, nil
 }
 
+func removeNodeAttachments(tx store.Tx, nodeID string) error {
+	// orphan the node's attached containers. if we don't do this, the
+	// network these attachments are connected to will never be removeable
+	tasks, err := store.FindTasks(tx, store.ByNodeID(nodeID))
+	if err != nil {
+		return err
+	}
+	for _, task := range tasks {
+		// if the task is an attachment, then we just delete it. the allocator
+		// will do the heavy lifting. basically, GetAttachment will return the
+		// attachment if that's the kind of runtime, or nil if it's not.
+		if task.Spec.GetAttachment() != nil {
+			// don't delete the task. instead, update it to `ORPHANED` so that
+			// the taskreaper will clean it up.
+			task.Status.State = api.TaskStateOrphaned
+			if err := store.UpdateTask(tx, task); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
 // RemoveNode removes a Node referenced by NodeID with the given NodeSpec.
 // - Returns NotFound if the Node is not found.
 // - Returns FailedPrecondition if the Node has manager role (and is part of the memberlist) or is not shut down.
@@ -313,6 +336,10 @@
 			return err
 		}
 
+		if err := removeNodeAttachments(tx, request.NodeID); err != nil {
+			return err
+		}
+
 		return store.DeleteNode(tx, request.NodeID)
 	})
 	if err != nil {
diff --git a/vendor/github.com/docker/swarmkit/manager/controlapi/service.go b/vendor/github.com/docker/swarmkit/manager/controlapi/service.go
index 0a0a9ff..951c27b 100644
--- a/vendor/github.com/docker/swarmkit/manager/controlapi/service.go
+++ b/vendor/github.com/docker/swarmkit/manager/controlapi/service.go
@@ -56,10 +56,7 @@
 	if err := validateResources(r.Limits); err != nil {
 		return err
 	}
-	if err := validateResources(r.Reservations); err != nil {
-		return err
-	}
-	return nil
+	return validateResources(r.Reservations)
 }
 
 func validateRestartPolicy(rp *api.RestartPolicy) error {
@@ -161,11 +158,7 @@
 		return err
 	}
 
-	if err := validateHealthCheck(container.Healthcheck); err != nil {
-		return err
-	}
-
-	return nil
+	return validateHealthCheck(container.Healthcheck)
 }
 
 // validateImage validates image name in containerSpec
@@ -481,11 +474,7 @@
 	if err := validateEndpointSpec(spec.Endpoint); err != nil {
 		return err
 	}
-	if err := validateMode(spec); err != nil {
-		return err
-	}
-
-	return nil
+	return validateMode(spec)
 }
 
 // checkPortConflicts does a best effort to find if the passed in spec has port
diff --git a/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go b/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
index 8ad1df3..4de6a30 100644
--- a/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
+++ b/vendor/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go
@@ -854,10 +854,7 @@
 		appliesTo = msg.ResultsIn
 		msg.Type = assignmentType
 
-		if err := stream.Send(&msg); err != nil {
-			return err
-		}
-		return nil
+		return stream.Send(&msg)
 	}
 
 	// TODO(aaronl): Also send node secrets that should be exposed to
diff --git a/vendor/github.com/docker/swarmkit/manager/manager.go b/vendor/github.com/docker/swarmkit/manager/manager.go
index d3e88fd..39db22b 100644
--- a/vendor/github.com/docker/swarmkit/manager/manager.go
+++ b/vendor/github.com/docker/swarmkit/manager/manager.go
@@ -54,6 +54,9 @@
 const (
 	// defaultTaskHistoryRetentionLimit is the number of tasks to keep.
 	defaultTaskHistoryRetentionLimit = 5
+
+	// Default value for grpc max message size.
+	grpcMaxMessageSize = 128 << 20
 )
 
 // RemoteAddrs provides a listening address and an optional advertise address
@@ -231,6 +234,7 @@
 		grpc.Creds(config.SecurityConfig.ServerTLSCreds),
 		grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor),
 		grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor),
+		grpc.MaxMsgSize(grpcMaxMessageSize),
 	}
 
 	m := &Manager{
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/constraintenforcer/constraint_enforcer.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/constraintenforcer/constraint_enforcer.go
index 2978898..7aa7651 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/constraintenforcer/constraint_enforcer.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/constraintenforcer/constraint_enforcer.go
@@ -159,7 +159,8 @@
 					// restarting the task on another node
 					// (if applicable).
 					t.Status.State = api.TaskStateRejected
-					t.Status.Message = "assigned node no longer meets constraints"
+					t.Status.Message = "task rejected by constraint enforcer"
+					t.Status.Err = "assigned node no longer meets constraints"
 					t.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
 					return store.UpdateTask(tx, t)
 				})
diff --git a/vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go b/vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
index 0bda96e..f7e4ff5 100644
--- a/vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
+++ b/vendor/github.com/docker/swarmkit/manager/orchestrator/update/updater.go
@@ -384,10 +384,7 @@
 				return errors.New("service was deleted")
 			}
 
-			if err := store.CreateTask(tx, updated); err != nil {
-				return err
-			}
-			return nil
+			return store.CreateTask(tx, updated)
 		})
 		if err != nil {
 			return err
diff --git a/vendor/github.com/docker/swarmkit/manager/scheduler/filter.go b/vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
index 36b601c..3b1c73f 100644
--- a/vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
+++ b/vendor/github.com/docker/swarmkit/manager/scheduler/filter.go
@@ -169,7 +169,7 @@
 		}
 	}
 
-	if f.t.Spec.LogDriver != nil {
+	if f.t.Spec.LogDriver != nil && f.t.Spec.LogDriver.Name != "none" {
 		// If there are no log driver types in the list at all, most likely this is
 		// an older daemon that did not report this information. In this case don't filter
 		if typeFound, exists := f.pluginExistsOnNode("Log", f.t.Spec.LogDriver.Name, nodePlugins); !exists && typeFound {
diff --git a/vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go b/vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
index 73349e3..6d5b4e5 100644
--- a/vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
+++ b/vendor/github.com/docker/swarmkit/manager/scheduler/scheduler.go
@@ -92,11 +92,7 @@
 		tasksByNode[t.NodeID][t.ID] = t
 	}
 
-	if err := s.buildNodeSet(tx, tasksByNode); err != nil {
-		return err
-	}
-
-	return nil
+	return s.buildNodeSet(tx, tasksByNode)
 }
 
 // Run is the scheduler event loop.
@@ -450,7 +446,9 @@
 						continue
 					}
 
-					if t.Status.State == decision.new.Status.State && t.Status.Message == decision.new.Status.Message {
+					if t.Status.State == decision.new.Status.State &&
+						t.Status.Message == decision.new.Status.Message &&
+						t.Status.Err == decision.new.Status.Err {
 						// No changes, ignore
 						continue
 					}
@@ -506,7 +504,7 @@
 	if !s.pipeline.Process(&nodeInfo) {
 		// this node cannot accommodate this task
 		newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
-		newT.Status.Message = s.pipeline.Explain()
+		newT.Status.Err = s.pipeline.Explain()
 		s.allTasks[t.ID] = &newT
 
 		return &newT
@@ -706,9 +704,9 @@
 		newT := *t
 		newT.Status.Timestamp = ptypes.MustTimestampProto(time.Now())
 		if explanation != "" {
-			newT.Status.Message = "no suitable node (" + explanation + ")"
+			newT.Status.Err = "no suitable node (" + explanation + ")"
 		} else {
-			newT.Status.Message = "no suitable node"
+			newT.Status.Err = "no suitable node"
 		}
 		s.allTasks[t.ID] = &newT
 		schedulingDecisions[t.ID] = schedulingDecision{old: t, new: &newT}
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/raft.go b/vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
index cfbec99..28c7cfa 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/raft.go
@@ -180,9 +180,12 @@
 	ClockSource clock.Clock
 	// SendTimeout is the timeout on the sending messages to other raft
 	// nodes. Leave this as 0 to get the default value.
-	SendTimeout    time.Duration
-	TLSCredentials credentials.TransportCredentials
-	KeyRotator     EncryptionKeyRotator
+	SendTimeout time.Duration
+	// LargeSendTimeout is the timeout on the sending snapshots to other raft
+	// nodes. Leave this as 0 to get the default value.
+	LargeSendTimeout time.Duration
+	TLSCredentials   credentials.TransportCredentials
+	KeyRotator       EncryptionKeyRotator
 	// DisableStackDump prevents Run from dumping goroutine stacks when the
 	// store becomes stuck.
 	DisableStackDump bool
@@ -204,6 +207,11 @@
 	if opts.SendTimeout == 0 {
 		opts.SendTimeout = 2 * time.Second
 	}
+	if opts.LargeSendTimeout == 0 {
+		// a "slow" 100Mbps connection can send over 240MB data in 20 seconds
+		// which is well over the gRPC message limit of 128MB allowed by SwarmKit
+		opts.LargeSendTimeout = 20 * time.Second
+	}
 
 	raftStore := raft.NewMemoryStorage()
 
@@ -349,6 +357,7 @@
 	transportConfig := &transport.Config{
 		HeartbeatInterval: time.Duration(n.Config.ElectionTick) * n.opts.TickInterval,
 		SendTimeout:       n.opts.SendTimeout,
+		LargeSendTimeout:  n.opts.LargeSendTimeout,
 		Credentials:       n.opts.TLSCredentials,
 		Raft:              n,
 	}
@@ -542,6 +551,7 @@
 		n.done()
 	}()
 
+	// Flag that indicates if this manager node is *currently* the raft leader.
 	wasLeader := false
 	transferLeadershipLimit := rate.NewLimiter(rate.Every(time.Minute), 1)
 
@@ -563,10 +573,13 @@
 				return errors.Wrap(err, "failed to save entries to storage")
 			}
 
+			// If the memory store lock has been held for too long,
+			// transferring leadership is an easy way to break out of it.
 			if wasLeader &&
 				(rd.SoftState == nil || rd.SoftState.RaftState == raft.StateLeader) &&
 				n.memoryStore.Wedged() &&
 				transferLeadershipLimit.Allow() {
+				log.G(ctx).Error("Attempting to transfer leadership")
 				if !n.opts.DisableStackDump {
 					signal.DumpStacks("")
 				}
@@ -612,6 +625,8 @@
 			if rd.SoftState != nil {
 				if wasLeader && rd.SoftState.RaftState != raft.StateLeader {
 					wasLeader = false
+					log.G(ctx).Error("soft state changed, node no longer a leader, resetting and cancelling all waits")
+
 					if atomic.LoadUint32(&n.signalledLeadership) == 1 {
 						atomic.StoreUint32(&n.signalledLeadership, 0)
 						n.leadershipBroadcast.Publish(IsFollower)
@@ -630,6 +645,7 @@
 					// cancelAll, or by its own check of signalledLeadership.
 					n.wait.cancelAll()
 				} else if !wasLeader && rd.SoftState.RaftState == raft.StateLeader {
+					// Node just became a leader.
 					wasLeader = true
 				}
 			}
@@ -1283,10 +1299,7 @@
 		return err
 	}
 	newAddr := net.JoinHostPort(newHost, officialPort)
-	if err := n.transport.UpdatePeerAddr(id, newAddr); err != nil {
-		return err
-	}
-	return nil
+	return n.transport.UpdatePeerAddr(id, newAddr)
 }
 
 // ProcessRaftMessage calls 'Step' which advances the
@@ -1481,7 +1494,7 @@
 	return nil
 }
 
-// ProposeValue calls Propose on the raft and waits
+// ProposeValue calls Propose on the underlying raft library(etcd/raft) and waits
 // on the commit log action before returning a result
 func (n *Node) ProposeValue(ctx context.Context, storeAction []api.StoreAction, cb func()) error {
 	ctx, cancel := n.WithContext(ctx)
@@ -1657,11 +1670,14 @@
 	return nil
 }
 
-// processInternalRaftRequest sends a message to nodes participating
-// in the raft to apply a log entry and then waits for it to be applied
-// on the server. It will block until the update is performed, there is
-// an error or until the raft node finalizes all the proposals on node
-// shutdown.
+// processInternalRaftRequest proposes a value to be appended to the raft log.
+// It calls Propose() on etcd/raft, which calls back into the raft FSM,
+// which then sends a message to each of the participating nodes
+// in the raft group to apply a log entry and then waits for it to be applied
+// on this node. It will block until the this node:
+// 1. Gets the necessary replies back from the participating nodes and also performs the commit itself, or
+// 2. There is an error, or
+// 3. Until the raft node finalizes all the proposals on node shutdown.
 func (n *Node) processInternalRaftRequest(ctx context.Context, r *api.InternalRaftRequest, cb func()) (proto.Message, error) {
 	n.stopMu.RLock()
 	if !n.IsMember() {
@@ -1682,6 +1698,7 @@
 
 	// Do this check after calling register to avoid a race.
 	if atomic.LoadUint32(&n.signalledLeadership) != 1 {
+		log.G(ctx).Error("node is no longer leader, aborting propose")
 		n.wait.cancel(r.ID)
 		return nil, ErrLostLeadership
 	}
@@ -1706,14 +1723,23 @@
 	select {
 	case x, ok := <-ch:
 		if !ok {
+			// Wait notification channel was closed. This should only happen if the wait was cancelled.
+			log.G(ctx).Error("wait cancelled")
+			if atomic.LoadUint32(&n.signalledLeadership) == 1 {
+				log.G(ctx).Error("wait cancelled but node is still a leader")
+			}
 			return nil, ErrLostLeadership
 		}
 		return x.(proto.Message), nil
 	case <-waitCtx.Done():
 		n.wait.cancel(r.ID)
-		// if channel is closed, wait item was canceled, otherwise it was triggered
+		// If we can read from the channel, wait item was triggered. Otherwise it was cancelled.
 		x, ok := <-ch
 		if !ok {
+			log.G(ctx).WithError(waitCtx.Err()).Error("wait context cancelled")
+			if atomic.LoadUint32(&n.signalledLeadership) == 1 {
+				log.G(ctx).Error("wait context cancelled but node is still a leader")
+			}
 			return nil, ErrLostLeadership
 		}
 		return x.(proto.Message), nil
@@ -1782,21 +1808,26 @@
 	}
 
 	if !n.wait.trigger(r.ID, r) {
+		log.G(ctx).Errorf("wait not found for raft request id %x", r.ID)
+
 		// There was no wait on this ID, meaning we don't have a
 		// transaction in progress that would be committed to the
 		// memory store by the "trigger" call. Either a different node
 		// wrote this to raft, or we wrote it before losing the leader
-		// position and cancelling the transaction. Create a new
-		// transaction to commit the data.
+		// position and cancelling the transaction. This entry still needs
+		// to be committed since other nodes have already committed it.
+		// Create a new transaction to commit this entry.
 
 		// It should not be possible for processInternalRaftRequest
 		// to be running in this situation, but out of caution we
 		// cancel any current invocations to avoid a deadlock.
+		// TODO(anshul) This call is likely redundant, remove after consideration.
 		n.wait.cancelAll()
 
 		err := n.memoryStore.ApplyStoreActions(r.Action)
 		if err != nil {
 			log.G(ctx).WithError(err).Error("failed to apply actions from raft")
+			// TODO(anshul) return err here ?
 		}
 	}
 	return nil
@@ -1848,10 +1879,7 @@
 		return nil
 	}
 
-	if err = n.registerNode(member); err != nil {
-		return err
-	}
-	return nil
+	return n.registerNode(member)
 }
 
 // applyUpdateNode is called when we receive a ConfChange from a member in the
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/storage/storage.go b/vendor/github.com/docker/swarmkit/manager/state/raft/storage/storage.go
index a737aab..539e05d 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/storage/storage.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/storage/storage.go
@@ -226,10 +226,7 @@
 	if err := snapshotter.SaveSnap(snapshot); err != nil {
 		return err
 	}
-	if err := e.wal.ReleaseLockTo(snapshot.Metadata.Index); err != nil {
-		return err
-	}
-	return nil
+	return e.wal.ReleaseLockTo(snapshot.Metadata.Index)
 }
 
 // GC garbage collects snapshots and wals older than the provided index and term
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
index 55639af..8c7ca75 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/peer.go
@@ -133,7 +133,14 @@
 }
 
 func (p *peer) sendProcessMessage(ctx context.Context, m raftpb.Message) error {
-	ctx, cancel := context.WithTimeout(ctx, p.tr.config.SendTimeout)
+	timeout := p.tr.config.SendTimeout
+	// if a snapshot is being sent, set timeout to LargeSendTimeout because
+	// sending snapshots can take more time than other messages sent between peers.
+	// The same applies to AppendEntries as well, where messages can get large.
+	if m.Type == raftpb.MsgSnap || m.Type == raftpb.MsgApp {
+		timeout = p.tr.config.LargeSendTimeout
+	}
+	ctx, cancel := context.WithTimeout(ctx, timeout)
 	defer cancel()
 	_, err := api.NewRaftClient(p.conn()).ProcessRaftMessage(ctx, &api.ProcessRaftMessageRequest{Message: &m})
 	if grpc.Code(err) == codes.NotFound && grpc.ErrorDesc(err) == membership.ErrMemberRemoved.Error() {
diff --git a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go
index 69057f6..6f096ef 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/raft/transport/transport.go
@@ -3,6 +3,7 @@
 package transport
 
 import (
+	"net"
 	"sync"
 	"time"
 
@@ -35,6 +36,7 @@
 type Config struct {
 	HeartbeatInterval time.Duration
 	SendTimeout       time.Duration
+	LargeSendTimeout  time.Duration
 	Credentials       credentials.TransportCredentials
 	RaftID            string
 
@@ -235,10 +237,7 @@
 	if !ok {
 		return ErrIsNotFound
 	}
-	if err := p.updateAddr(addr); err != nil {
-		return err
-	}
-	return nil
+	return p.updateAddr(addr)
 }
 
 // PeerConn returns raw grpc connection to peer.
@@ -350,6 +349,13 @@
 		grpcOptions = append(grpcOptions, grpc.WithTimeout(t.config.SendTimeout))
 	}
 
+	// gRPC dialer connects to proxy first. Provide a custom dialer here avoid that.
+	// TODO(anshul) Add an option to configure this.
+	grpcOptions = append(grpcOptions,
+		grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+			return net.DialTimeout("tcp", addr, timeout)
+		}))
+
 	cc, err := grpc.Dial(addr, grpcOptions...)
 	if err != nil {
 		return nil, err
diff --git a/vendor/github.com/docker/swarmkit/manager/state/store/memory.go b/vendor/github.com/docker/swarmkit/manager/state/store/memory.go
index 62ab927..01245a6 100644
--- a/vendor/github.com/docker/swarmkit/manager/state/store/memory.go
+++ b/vendor/github.com/docker/swarmkit/manager/state/store/memory.go
@@ -83,8 +83,7 @@
 	schema.Tables[os.Table.Name] = os.Table
 }
 
-// timedMutex wraps a sync.Mutex, and keeps track of how long it has been
-// locked.
+// timedMutex wraps a sync.Mutex, and keeps track of when it was locked.
 type timedMutex struct {
 	sync.Mutex
 	lockedAt atomic.Value
diff --git a/vendor/github.com/docker/swarmkit/vendor.conf b/vendor/github.com/docker/swarmkit/vendor.conf
index abb1a6c..8949ea0 100644
--- a/vendor/github.com/docker/swarmkit/vendor.conf
+++ b/vendor/github.com/docker/swarmkit/vendor.conf
@@ -24,7 +24,7 @@
 github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
 github.com/docker/go-units 954fed01cc617c55d838fa2230073f2cb17386c8
 github.com/docker/libkv 9fd56606e928ff1f309808f5d5a0b7a2ef73f9a8
-github.com/docker/libnetwork 19ac3ea7f52bb46e0eb10669756cdae0c441a5b1 
+github.com/docker/libnetwork 21544598c53fa36a3c771a8725c643dd2340f845 
 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
 github.com/opencontainers/runc d40db12e72a40109dfcf28539f5ee0930d2f0277
 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 
diff --git a/vendor/github.com/moby/buildkit/README.md b/vendor/github.com/moby/buildkit/README.md
index ddcbb01..4101bf4 100644
--- a/vendor/github.com/moby/buildkit/README.md
+++ b/vendor/github.com/moby/buildkit/README.md
@@ -1,10 +1,16 @@
-### Important: This repository is in an early development phase and not suitable for practical workloads. It does not compare with `docker build` features yet.
+### Important: This repository is in an early development phase
 
 [![asciicinema example](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU.png)](https://asciinema.org/a/gPEIEo1NzmDTUu2bEPsUboqmU)
 
 
 ## BuildKit
 
+<!-- godoc is mainly for LLB stuff -->
+[![GoDoc](https://godoc.org/github.com/moby/buildkit?status.svg)](https://godoc.org/github.com/moby/buildkit/client/llb)
+[![Build Status](https://travis-ci.org/moby/buildkit.svg?branch=master)](https://travis-ci.org/moby/buildkit)
+[![Go Report Card](https://goreportcard.com/badge/github.com/moby/buildkit)](https://goreportcard.com/report/github.com/moby/buildkit)
+
+
 BuildKit is a toolkit for converting source code to build artifacts in an efficient, expressive and repeatable manner.
 
 Key features:
@@ -23,7 +29,7 @@
 
 #### Quick start
 
-BuildKit daemon can be built in two different versions: one that uses [containerd](https://github.com/containerd/containerd) for execution and distribution, and a standalone version that doesn't have other dependencies apart from [runc](https://github.com/opencontainers/runc). We are open for adding more backends. `buildd` is a CLI utility for running the gRPC API. 
+BuildKit daemon can be built in two different versions: one that uses [containerd](https://github.com/containerd/containerd) for execution and distribution, and a standalone version that doesn't have other dependencies apart from [runc](https://github.com/opencontainers/runc). We are open for adding more backends. `buildd` is a CLI utility for serving the gRPC API. 
 
 ```bash
 # buildd daemon (choose one)
@@ -36,17 +42,15 @@
 
 You can also use `make binaries` that prepares all binaries into the `bin/` directory.
 
-The first thing to test could be to try building BuildKit with BuildKit. BuildKit provides a low-level solver format that could be used by multiple build definitions. Preparation work for making the Dockerfile parser reusable as a frontend is tracked in https://github.com/moby/moby/pull/33492. As no frontends have been integrated yet we currently have to use a client library to generate this low-level definition.
-
 `examples/buildkit*` directory contains scripts that define how to build different configurations of BuildKit and its dependencies using the `client` package. Running one of these script generates a protobuf definition of a build graph. Note that the script itself does not execute any steps of the build.
 
-You can use `buildctl debug dump-llb` to see what data is this definition.
+You can use `buildctl debug dump-llb` to see what data is in this definition. Add `--dot` to generate dot layout.
 
 ```bash
 go run examples/buildkit0/buildkit.go | buildctl debug dump-llb | jq .
 ```
 
-To start building use `buildctl build` command. The script accepts `--target` flag to choose between `containerd` and `standalone` configurations. In standalone mode BuildKit binaries are built together with `runc`. In containerd mode, the `containerd` binary is built as well from the upstream repo.
+To start building use `buildctl build` command. The example script accepts `--target` flag to choose between `containerd` and `standalone` configurations. In standalone mode BuildKit binaries are built together with `runc`. In containerd mode, the `containerd` binary is built as well from the upstream repo.
 
 ```bash
 go run examples/buildkit0/buildkit.go | buildctl build
@@ -59,10 +63,52 @@
 - `./examples/buildkit0` - uses only exec operations, defines a full stage per component.
 - `./examples/buildkit1` - cloning git repositories has been separated for extra concurrency.
 - `./examples/buildkit2` - uses git sources directly instead of running `git clone`, allowing better performance and much safer caching.
+- `./examples/buildkit3` - allows using local source files for separate components eg. `./buildkit3 --runc=local | buildctl build --local runc-src=some/local/path`  
+- `./examples/dockerfile2llb` - can be used to convert a Dockerfile to LLB for debugging purposes
+- `./examples/gobuild` - shows how to use nested invocation to generate LLB for Go package internal dependencies
+
+
+#### Examples
+
+##### Starting the buildd daemon:
+
+```
+buildd-standalone --debug --root /var/lib/buildkit
+```
+
+##### Building a Dockerfile:
+
+```
+buildctl build --frontend=dockerfile.v0 --local context=. --local dockerfile=.
+```
+
+`context` and `dockerfile` should point to local directories for build context and Dockerfile location.
+
+
+##### Exporting resulting image to containerd
+
+Containerd version of buildd needs to be used
+
+```
+buildctl build ... --exporter=image --exporter-opt name=docker.io/username/image
+ctr --namespace=buildkit images ls
+```
+
+##### Exporting build result back to client
+
+```
+buildctl build ... --exporter=local --exporter-opt output=path/to/output-dir
+```
+
+#### View build cache
+
+```
+buildctl du -v
+```
 
 #### Supported runc version
 
-During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/3707703a694187c7d08e2f333da6ddd58bcb729d/RUNC.md) for more information.
+During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/d1e11f17ec7b325f89608dd46c128300b8727d50/RUNC.md) for more information.
 
 
 #### Contributing
diff --git a/vendor/github.com/moby/buildkit/session/context.go b/vendor/github.com/moby/buildkit/session/context.go
new file mode 100644
index 0000000..31a29f0
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/session/context.go
@@ -0,0 +1,22 @@
+package session
+
+import "context"
+
+type contextKeyT string
+
+var contextKey = contextKeyT("buildkit/session-id")
+
+func NewContext(ctx context.Context, id string) context.Context {
+	if id != "" {
+		return context.WithValue(ctx, contextKey, id)
+	}
+	return ctx
+}
+
+func FromContext(ctx context.Context) string {
+	v := ctx.Value(contextKey)
+	if v == nil {
+		return ""
+	}
+	return v.(string)
+}
diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
index 58b2968..c5a3b5b 100644
--- a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
+++ b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
@@ -1,31 +1,55 @@
 package filesync
 
 import (
+	"os"
 	"time"
 
-	"google.golang.org/grpc"
-
 	"github.com/sirupsen/logrus"
 	"github.com/tonistiigi/fsutil"
+	"google.golang.org/grpc"
 )
 
-func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error {
+func sendDiffCopy(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error {
 	return fsutil.Send(stream.Context(), stream, dir, &fsutil.WalkOpt{
 		ExcludePatterns: excludes,
-		IncludePaths:    includes, // TODO: rename IncludePatterns
+		IncludePatterns: includes,
+		Map:             _map,
 	}, progress)
 }
 
-func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater) error {
+func recvDiffCopy(ds grpc.Stream, dest string, cu CacheUpdater, progress progressCb) error {
 	st := time.Now()
 	defer func() {
 		logrus.Debugf("diffcopy took: %v", time.Since(st))
 	}()
 	var cf fsutil.ChangeFunc
+	var ch fsutil.ContentHasher
 	if cu != nil {
 		cu.MarkSupported(true)
 		cf = cu.HandleChange
+		ch = cu.ContentHasher()
 	}
+	return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
+		NotifyHashed:  cf,
+		ContentHasher: ch,
+		ProgressCb:    progress,
+	})
+}
 
-	return fsutil.Receive(ds.Context(), ds, dest, cf)
+func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
+	if err := os.MkdirAll(dest, 0700); err != nil {
+		return err
+	}
+	return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
+		Merge: true,
+		Filter: func() func(*fsutil.Stat) bool {
+			uid := os.Getuid()
+			gid := os.Getgid()
+			return func(st *fsutil.Stat) bool {
+				st.Uid = uint32(uid)
+				st.Gid = uint32(gid)
+				return true
+			}
+		}(),
+	})
 }
diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go
index fe4d00a..5642f07 100644
--- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go
+++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go
@@ -1,6 +1,7 @@
 package filesync
 
 import (
+	"fmt"
 	"os"
 	"strings"
 
@@ -15,20 +16,29 @@
 const (
 	keyOverrideExcludes = "override-excludes"
 	keyIncludePatterns  = "include-patterns"
+	keyDirName          = "dir-name"
 )
 
 type fsSyncProvider struct {
-	root     string
-	excludes []string
-	p        progressCb
-	doneCh   chan error
+	dirs   map[string]SyncedDir
+	p      progressCb
+	doneCh chan error
+}
+
+type SyncedDir struct {
+	Name     string
+	Dir      string
+	Excludes []string
+	Map      func(*fsutil.Stat) bool
 }
 
 // NewFSSyncProvider creates a new provider for sending files from client
-func NewFSSyncProvider(root string, excludes []string) session.Attachable {
+func NewFSSyncProvider(dirs []SyncedDir) session.Attachable {
 	p := &fsSyncProvider{
-		root:     root,
-		excludes: excludes,
+		dirs: map[string]SyncedDir{},
+	}
+	for _, d := range dirs {
+		p.dirs[d.Name] = d
 	}
 	return p
 }
@@ -58,9 +68,19 @@
 
 	opts, _ := metadata.FromContext(stream.Context()) // if no metadata continue with empty object
 
+	name, ok := opts[keyDirName]
+	if !ok || len(name) != 1 {
+		return errors.New("no dir name in request")
+	}
+
+	dir, ok := sp.dirs[name[0]]
+	if !ok {
+		return errors.Errorf("no access allowed to dir %q", name[0])
+	}
+
 	var excludes []string
 	if len(opts[keyOverrideExcludes]) == 0 || opts[keyOverrideExcludes][0] != "true" {
-		excludes = sp.excludes
+		excludes = dir.Excludes
 	}
 	includes := opts[keyIncludePatterns]
 
@@ -75,7 +95,7 @@
 		doneCh = sp.doneCh
 		sp.doneCh = nil
 	}
-	err := pr.sendFn(stream, sp.root, includes, excludes, progress)
+	err := pr.sendFn(stream, dir.Dir, includes, excludes, progress, dir.Map)
 	if doneCh != nil {
 		if err != nil {
 			doneCh <- err
@@ -94,8 +114,8 @@
 
 type protocol struct {
 	name   string
-	sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb) error
-	recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater) error
+	sendFn func(stream grpc.Stream, srcDir string, includes, excludes []string, progress progressCb, _map func(*fsutil.Stat) bool) error
+	recvFn func(stream grpc.Stream, destDir string, cu CacheUpdater, progress progressCb) error
 }
 
 func isProtoSupported(p string) bool {
@@ -112,25 +132,23 @@
 		sendFn: sendDiffCopy,
 		recvFn: recvDiffCopy,
 	},
-	{
-		name:   "tarstream",
-		sendFn: sendTarStream,
-		recvFn: recvTarStream,
-	},
 }
 
 // FSSendRequestOpt defines options for FSSend request
 type FSSendRequestOpt struct {
+	Name             string
 	IncludePatterns  []string
 	OverrideExcludes bool
 	DestDir          string
 	CacheUpdater     CacheUpdater
+	ProgressCb       func(int, bool)
 }
 
 // CacheUpdater is an object capable of sending notifications for the cache hash changes
 type CacheUpdater interface {
 	MarkSupported(bool)
 	HandleChange(fsutil.ChangeKind, string, os.FileInfo, error) error
+	ContentHasher() fsutil.ContentHasher
 }
 
 // FSSync initializes a transfer of files
@@ -155,6 +173,8 @@
 		opts[keyIncludePatterns] = opt.IncludePatterns
 	}
 
+	opts[keyDirName] = []string{opt.Name}
+
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 
@@ -177,7 +197,45 @@
 			return err
 		}
 		stream = cc
+	default:
+		panic(fmt.Sprintf("invalid protocol: %q", pr.name))
 	}
 
-	return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater)
+	return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb)
+}
+
+// NewFSSyncTarget allows writing into a directory
+func NewFSSyncTarget(outdir string) session.Attachable {
+	p := &fsSyncTarget{
+		outdir: outdir,
+	}
+	return p
+}
+
+type fsSyncTarget struct {
+	outdir string
+}
+
+func (sp *fsSyncTarget) Register(server *grpc.Server) {
+	RegisterFileSendServer(server, sp)
+}
+
+func (sp *fsSyncTarget) DiffCopy(stream FileSend_DiffCopyServer) error {
+	return syncTargetDiffCopy(stream, sp.outdir)
+}
+
+func CopyToCaller(ctx context.Context, srcPath string, c session.Caller, progress func(int, bool)) error {
+	method := session.MethodURL(_FileSend_serviceDesc.ServiceName, "diffcopy")
+	if !c.Supports(method) {
+		return errors.Errorf("method %s not supported by the client", method)
+	}
+
+	client := NewFileSendClient(c.Conn())
+
+	cc, err := client.DiffCopy(ctx)
+	if err != nil {
+		return err
+	}
+
+	return sendDiffCopy(cc, srcPath, nil, nil, progress, nil)
 }
diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go
index c6ed666..69c7888 100644
--- a/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go
+++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.pb.go
@@ -277,6 +277,102 @@
 	Metadata: "filesync.proto",
 }
 
+// Client API for FileSend service
+
+type FileSendClient interface {
+	DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error)
+}
+
+type fileSendClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewFileSendClient(cc *grpc.ClientConn) FileSendClient {
+	return &fileSendClient{cc}
+}
+
+func (c *fileSendClient) DiffCopy(ctx context.Context, opts ...grpc.CallOption) (FileSend_DiffCopyClient, error) {
+	stream, err := grpc.NewClientStream(ctx, &_FileSend_serviceDesc.Streams[0], c.cc, "/moby.filesync.v1.FileSend/DiffCopy", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &fileSendDiffCopyClient{stream}
+	return x, nil
+}
+
+type FileSend_DiffCopyClient interface {
+	Send(*BytesMessage) error
+	Recv() (*BytesMessage, error)
+	grpc.ClientStream
+}
+
+type fileSendDiffCopyClient struct {
+	grpc.ClientStream
+}
+
+func (x *fileSendDiffCopyClient) Send(m *BytesMessage) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *fileSendDiffCopyClient) Recv() (*BytesMessage, error) {
+	m := new(BytesMessage)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// Server API for FileSend service
+
+type FileSendServer interface {
+	DiffCopy(FileSend_DiffCopyServer) error
+}
+
+func RegisterFileSendServer(s *grpc.Server, srv FileSendServer) {
+	s.RegisterService(&_FileSend_serviceDesc, srv)
+}
+
+func _FileSend_DiffCopy_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(FileSendServer).DiffCopy(&fileSendDiffCopyServer{stream})
+}
+
+type FileSend_DiffCopyServer interface {
+	Send(*BytesMessage) error
+	Recv() (*BytesMessage, error)
+	grpc.ServerStream
+}
+
+type fileSendDiffCopyServer struct {
+	grpc.ServerStream
+}
+
+func (x *fileSendDiffCopyServer) Send(m *BytesMessage) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *fileSendDiffCopyServer) Recv() (*BytesMessage, error) {
+	m := new(BytesMessage)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+var _FileSend_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "moby.filesync.v1.FileSend",
+	HandlerType: (*FileSendServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "DiffCopy",
+			Handler:       _FileSend_DiffCopy_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "filesync.proto",
+}
+
 func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
 	size := m.Size()
 	dAtA = make([]byte, size)
@@ -558,7 +654,7 @@
 func init() { proto.RegisterFile("filesync.proto", fileDescriptorFilesync) }
 
 var fileDescriptorFilesync = []byte{
-	// 198 bytes of a gzipped FileDescriptorProto
+	// 208 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0xcb, 0xcc, 0x49,
 	0x2d, 0xae, 0xcc, 0x4b, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xc8, 0xcd, 0x4f, 0xaa,
 	0xd4, 0x83, 0x0b, 0x96, 0x19, 0x2a, 0x29, 0x71, 0xf1, 0x38, 0x55, 0x96, 0xa4, 0x16, 0xfb, 0xa6,
@@ -566,10 +662,10 @@
 	0x30, 0x6a, 0xf0, 0x04, 0x81, 0xd9, 0x46, 0xab, 0x19, 0xb9, 0x38, 0xdc, 0x32, 0x73, 0x52, 0x83,
 	0x2b, 0xf3, 0x92, 0x85, 0xfc, 0xb8, 0x38, 0x5c, 0x32, 0xd3, 0xd2, 0x9c, 0xf3, 0x0b, 0x2a, 0x85,
 	0xe4, 0xf4, 0xd0, 0xcd, 0xd3, 0x43, 0x36, 0x4c, 0x8a, 0x80, 0xbc, 0x06, 0xa3, 0x01, 0xa3, 0x90,
-	0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x74, 0x32,
-	0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9,
-	0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e,
-	0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51,
-	0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0, 0x41, 0x64, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x0c,
-	0x8d, 0xc5, 0x34, 0x01, 0x00, 0x00,
+	0x3f, 0x17, 0x67, 0x48, 0x62, 0x51, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x35, 0x0c, 0x34, 0x8a,
+	0x82, 0x3a, 0x36, 0x35, 0x2f, 0x85, 0xda, 0x8e, 0x75, 0x32, 0xbb, 0xf0, 0x50, 0x8e, 0xe1, 0xc6,
+	0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78,
+	0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7,
+	0xf0, 0xe1, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x51, 0x1c, 0x30, 0xb3, 0x92, 0xd8, 0xc0,
+	0xc1, 0x6f, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x72, 0x81, 0x1a, 0x91, 0x90, 0x01, 0x00, 0x00,
 }
diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto b/vendor/github.com/moby/buildkit/session/filesync/filesync.proto
index 2fd5b3e..0ae2937 100644
--- a/vendor/github.com/moby/buildkit/session/filesync/filesync.proto
+++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.proto
@@ -9,6 +9,11 @@
   rpc TarStream(stream BytesMessage) returns (stream BytesMessage);
 }
 
+service FileSend{
+  rpc DiffCopy(stream BytesMessage) returns (stream BytesMessage);
+}
+
+
 // BytesMessage contains a chunk of byte data
 message BytesMessage{
 	bytes data = 1;
diff --git a/vendor/github.com/moby/buildkit/session/filesync/tarstream.go b/vendor/github.com/moby/buildkit/session/filesync/tarstream.go
deleted file mode 100644
index 5cab867..0000000
--- a/vendor/github.com/moby/buildkit/session/filesync/tarstream.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package filesync
-
-import (
-	"io"
-
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/chrootarchive"
-	"github.com/pkg/errors"
-	"github.com/sirupsen/logrus"
-	"google.golang.org/grpc"
-)
-
-func sendTarStream(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error {
-	a, err := archive.TarWithOptions(dir, &archive.TarOptions{
-		ExcludePatterns: excludes,
-	})
-	if err != nil {
-		return err
-	}
-
-	size := 0
-	buf := make([]byte, 1<<15)
-	t := new(BytesMessage)
-	for {
-		n, err := a.Read(buf)
-		if err != nil {
-			if err == io.EOF {
-				break
-			}
-			return err
-		}
-		t.Data = buf[:n]
-
-		if err := stream.SendMsg(t); err != nil {
-			return err
-		}
-		size += n
-		if progress != nil {
-			progress(size, false)
-		}
-	}
-	if progress != nil {
-		progress(size, true)
-	}
-	return nil
-}
-
-func recvTarStream(ds grpc.Stream, dest string, cs CacheUpdater) error {
-
-	pr, pw := io.Pipe()
-
-	go func() {
-		var (
-			err error
-			t   = new(BytesMessage)
-		)
-		for {
-			if err = ds.RecvMsg(t); err != nil {
-				if err == io.EOF {
-					err = nil
-				}
-				break
-			}
-			_, err = pw.Write(t.Data)
-			if err != nil {
-				break
-			}
-		}
-		if err = pw.CloseWithError(err); err != nil {
-			logrus.Errorf("failed to close tar transfer pipe")
-		}
-	}()
-
-	decompressedStream, err := archive.DecompressStream(pr)
-	if err != nil {
-		return errors.Wrap(err, "failed to decompress stream")
-	}
-
-	if err := chrootarchive.Untar(decompressedStream, dest, nil); err != nil {
-		return errors.Wrap(err, "failed to untar context")
-	}
-	return nil
-}
diff --git a/vendor/github.com/moby/buildkit/session/manager.go b/vendor/github.com/moby/buildkit/session/manager.go
index 9523e6f..b3e5955 100644
--- a/vendor/github.com/moby/buildkit/session/manager.go
+++ b/vendor/github.com/moby/buildkit/session/manager.go
@@ -49,14 +49,14 @@
 		return errors.New("handler does not support hijack")
 	}
 
-	uuid := r.Header.Get(headerSessionUUID)
+	id := r.Header.Get(headerSessionID)
 
 	proto := r.Header.Get("Upgrade")
 
 	sm.mu.Lock()
-	if _, ok := sm.sessions[uuid]; ok {
+	if _, ok := sm.sessions[id]; ok {
 		sm.mu.Unlock()
-		return errors.Errorf("session %s already exists", uuid)
+		return errors.Errorf("session %s already exists", id)
 	}
 
 	if proto == "" {
@@ -102,8 +102,10 @@
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 
+	opts = canonicalHeaders(opts)
+
 	h := http.Header(opts)
-	uuid := h.Get(headerSessionUUID)
+	id := h.Get(headerSessionID)
 	name := h.Get(headerSessionName)
 	sharedKey := h.Get(headerSessionSharedKey)
 
@@ -115,7 +117,7 @@
 
 	c := &client{
 		Session: Session{
-			uuid:      uuid,
+			id:        id,
 			name:      name,
 			sharedKey: sharedKey,
 			ctx:       ctx,
@@ -129,13 +131,13 @@
 	for _, m := range opts[headerSessionMethod] {
 		c.supported[strings.ToLower(m)] = struct{}{}
 	}
-	sm.sessions[uuid] = c
+	sm.sessions[id] = c
 	sm.updateCondition.Broadcast()
 	sm.mu.Unlock()
 
 	defer func() {
 		sm.mu.Lock()
-		delete(sm.sessions, uuid)
+		delete(sm.sessions, id)
 		sm.mu.Unlock()
 	}()
 
@@ -146,8 +148,8 @@
 	return nil
 }
 
-// Get returns a session by UUID
-func (sm *Manager) Get(ctx context.Context, uuid string) (Caller, error) {
+// Get returns a session by ID
+func (sm *Manager) Get(ctx context.Context, id string) (Caller, error) {
 	ctx, cancel := context.WithCancel(ctx)
 	defer cancel()
 
@@ -165,11 +167,11 @@
 		select {
 		case <-ctx.Done():
 			sm.mu.Unlock()
-			return nil, errors.Wrapf(ctx.Err(), "no active session for %s", uuid)
+			return nil, errors.Wrapf(ctx.Err(), "no active session for %s", id)
 		default:
 		}
 		var ok bool
-		c, ok = sm.sessions[uuid]
+		c, ok = sm.sessions[id]
 		if !ok || c.closed() {
 			sm.updateCondition.Wait()
 			continue
@@ -200,3 +202,11 @@
 func (c *client) Conn() *grpc.ClientConn {
 	return c.cc
 }
+
+func canonicalHeaders(in map[string][]string) map[string][]string {
+	out := map[string][]string{}
+	for k := range in {
+		out[http.CanonicalHeaderKey(k)] = in[k]
+	}
+	return out
+}
diff --git a/vendor/github.com/moby/buildkit/session/session.go b/vendor/github.com/moby/buildkit/session/session.go
index 147486a..454c3d7 100644
--- a/vendor/github.com/moby/buildkit/session/session.go
+++ b/vendor/github.com/moby/buildkit/session/session.go
@@ -12,7 +12,7 @@
 )
 
 const (
-	headerSessionUUID      = "X-Docker-Expose-Session-Uuid"
+	headerSessionID        = "X-Docker-Expose-Session-Uuid"
 	headerSessionName      = "X-Docker-Expose-Session-Name"
 	headerSessionSharedKey = "X-Docker-Expose-Session-Sharedkey"
 	headerSessionMethod    = "X-Docker-Expose-Session-Grpc-Method"
@@ -28,7 +28,7 @@
 
 // Session is a long running connection between client and a daemon
 type Session struct {
-	uuid       string
+	id         string
 	name       string
 	sharedKey  string
 	ctx        context.Context
@@ -39,9 +39,9 @@
 
 // NewSession returns a new long running session
 func NewSession(name, sharedKey string) (*Session, error) {
-	uuid := stringid.GenerateRandomID()
+	id := stringid.GenerateRandomID()
 	s := &Session{
-		uuid:       uuid,
+		id:         id,
 		name:       name,
 		sharedKey:  sharedKey,
 		grpcServer: grpc.NewServer(),
@@ -57,9 +57,9 @@
 	a.Register(s.grpcServer)
 }
 
-// UUID returns unique identifier for the session
-func (s *Session) UUID() string {
-	return s.uuid
+// ID returns unique identifier for the session
+func (s *Session) ID() string {
+	return s.id
 }
 
 // Run activates the session
@@ -72,7 +72,7 @@
 	defer close(s.done)
 
 	meta := make(map[string][]string)
-	meta[headerSessionUUID] = []string{s.uuid}
+	meta[headerSessionID] = []string{s.id}
 	meta[headerSessionName] = []string{s.name}
 	meta[headerSessionSharedKey] = []string{s.sharedKey}
 
@@ -92,6 +92,7 @@
 // Close closes the session
 func (s *Session) Close() error {
 	if s.cancelCtx != nil && s.done != nil {
+		s.grpcServer.Stop()
 		s.cancelCtx()
 		<-s.done
 	}
diff --git a/vendor/github.com/moby/buildkit/vendor.conf b/vendor/github.com/moby/buildkit/vendor.conf
index b13cfa9..f3760bd 100644
--- a/vendor/github.com/moby/buildkit/vendor.conf
+++ b/vendor/github.com/moby/buildkit/vendor.conf
@@ -6,26 +6,26 @@
 github.com/pmezard/go-difflib v1.0.0
 golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
 
-github.com/containerd/containerd 3707703a694187c7d08e2f333da6ddd58bcb729d
-golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
-github.com/Sirupsen/logrus v0.11.0
+github.com/containerd/containerd d1e11f17ec7b325f89608dd46c128300b8727d50
+golang.org/x/sync f52d1811a62927559de87708c8913c1650ce4f26
+github.com/sirupsen/logrus v1.0.0
 google.golang.org/grpc v1.3.0
 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
 golang.org/x/net 1f9224279e98554b6a6432d4dd998a739f8b2b7c
 github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
 github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
 github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463
-github.com/opencontainers/image-spec v1.0.0-rc6
-github.com/opencontainers/runc 429a5387123625040bacfbb60d96b1cbd02293ab
+github.com/opencontainers/image-spec v1.0.0
+github.com/opencontainers/runc e775f0fba3ea329b8b766451c892c41a3d49594d
 github.com/Microsoft/go-winio v0.4.1
 github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062
-github.com/opencontainers/runtime-spec 198f23f827eea397d4331d7eb048d9d4c7ff7bee
+github.com/opencontainers/runtime-spec 96de01bbb42c7af89bff100e10a9f0fb62e75bfb
 github.com/containerd/go-runc 2774a2ea124a5c2d0aba13b5c2dd8a5a9a48775d
 github.com/containerd/console 7fed77e673ca4abcd0cbd6d4d0e0e22137cbd778
-github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e
+github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
 google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
-github.com/docker/go-events aa2e3b613fbbfdddbe055a7b9e3ce271cfd83eca
+github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
 
 github.com/urfave/cli d70f47eeca3afd795160003bc6e28b001d60c67c
 github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52
@@ -33,8 +33,14 @@
 golang.org/x/time 8be79e1e0910c292df4e79c241bb7e8f7e725959
 
 github.com/BurntSushi/locker 392720b78f44e9d0249fcac6c43b111b47a370b8
-github.com/docker/docker 05c7c311390911daebcf5d9519dee813fc02a887
+github.com/docker/docker 6f723db8c6f0c7f0b252674a9673a25b5978db04 https://github.com/tonistiigi/docker.git
 github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
 
-github.com/tonistiigi/fsutil 0ac4c11b053b9c5c7c47558f81f96c7100ce50fb
+github.com/tonistiigi/fsutil 1dedf6e90084bd88c4c518a15e68a37ed1370203
 github.com/stevvooe/continuity 86cec1535a968310e7532819f699ff2830ed7463
+github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf
+github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
+github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
+github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
+github.com/docker/go-connections 3ede32e2033de7505e6500d6c868c2b9ed9f169d
+github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f
diff --git a/vendor/github.com/opencontainers/image-spec/identity/chainid.go b/vendor/github.com/opencontainers/image-spec/identity/chainid.go
new file mode 100644
index 0000000..0bb2853
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/identity/chainid.go
@@ -0,0 +1,67 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package identity provides implementations of subtle calculations pertaining
+// to image and layer identity.  The primary item present here is the ChainID
+// calculation used in identifying the result of subsequent layer applications.
+//
+// Helpers are also provided here to ease transition to the
+// github.com/opencontainers/go-digest package, but that package may be used
+// directly.
+package identity
+
+import "github.com/opencontainers/go-digest"
+
+// ChainID takes a slice of digests and returns the ChainID corresponding to
+// the last entry. Typically, these are a list of layer DiffIDs, with the
+// result providing the ChainID identifying the result of sequential
+// application of the preceding layers.
+func ChainID(dgsts []digest.Digest) digest.Digest {
+	chainIDs := make([]digest.Digest, len(dgsts))
+	copy(chainIDs, dgsts)
+	ChainIDs(chainIDs)
+
+	if len(chainIDs) == 0 {
+		return ""
+	}
+	return chainIDs[len(chainIDs)-1]
+}
+
+// ChainIDs calculates the recursively applied chain id for each identifier in
+// the slice. The result is written direcly back into the slice such that the
+// ChainID for each item will be in the respective position.
+//
+// By definition of ChainID, the zeroth element will always be the same before
+// and after the call.
+//
+// As an example, given the chain of ids `[A, B, C]`, the result `[A,
+// ChainID(A|B), ChainID(A|B|C)]` will be written back to the slice.
+//
+// The input is provided as a return value for convenience.
+//
+// Typically, these are a list of layer DiffIDs, with the
+// result providing the ChainID for each the result of each layer application
+// sequentially.
+func ChainIDs(dgsts []digest.Digest) []digest.Digest {
+	if len(dgsts) < 2 {
+		return dgsts
+	}
+
+	parent := digest.FromBytes([]byte(dgsts[0] + " " + dgsts[1]))
+	next := dgsts[1:]
+	next[0] = parent
+	ChainIDs(next)
+
+	return dgsts
+}
diff --git a/vendor/github.com/opencontainers/image-spec/identity/helpers.go b/vendor/github.com/opencontainers/image-spec/identity/helpers.go
new file mode 100644
index 0000000..9d96eaa
--- /dev/null
+++ b/vendor/github.com/opencontainers/image-spec/identity/helpers.go
@@ -0,0 +1,40 @@
+// Copyright 2016 The Linux Foundation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package identity
+
+import (
+	_ "crypto/sha256" // side-effect to install impls, sha256
+	_ "crypto/sha512" // side-effect to install impls, sha384/sh512
+
+	"io"
+
+	digest "github.com/opencontainers/go-digest"
+)
+
+// FromReader consumes the content of rd until io.EOF, returning canonical
+// digest.
+func FromReader(rd io.Reader) (digest.Digest, error) {
+	return digest.Canonical.FromReader(rd)
+}
+
+// FromBytes digests the input and returns a Digest.
+func FromBytes(p []byte) digest.Digest {
+	return digest.Canonical.FromBytes(p)
+}
+
+// FromString digests the input and returns a Digest.
+func FromString(s string) digest.Digest {
+	return digest.Canonical.FromString(s)
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
index 8475ff7..fe799bd 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go
@@ -37,7 +37,7 @@
 	// Cmd defines the default arguments to the entrypoint of the container.
 	Cmd []string `json:"Cmd,omitempty"`
 
-	// Volumes is a set of directories which should be created as data volumes in a container running this image.
+	// Volumes is a set of directories describing where the process is likely write data specific to a container instance.
 	Volumes map[string]struct{} `json:"Volumes,omitempty"`
 
 	// WorkingDir sets the current working directory of the entrypoint process in the container.
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
index f4cda6e..e3eee29 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
@@ -25,7 +25,7 @@
 	VersionPatch = 0
 
 	// VersionDev indicates development branch. Releases will be empty string.
-	VersionDev = "-rc6-dev"
+	VersionDev = ""
 )
 
 // Version is the specification version that the package types support.
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
index 269ffff..3cae4fd 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/config.go
@@ -187,6 +187,10 @@
 
 	// Rootless specifies whether the container is a rootless container.
 	Rootless bool `json:"rootless"`
+
+	// IntelRdt specifies settings for Intel RDT/CAT group that the container is placed into
+	// to limit the resources (e.g., L3 cache) the container has available
+	IntelRdt *IntelRdt `json:"intel_rdt,omitempty"`
 }
 
 type Hooks struct {
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go
new file mode 100644
index 0000000..36bd5f9
--- /dev/null
+++ b/vendor/github.com/opencontainers/runc/libcontainer/configs/intelrdt.go
@@ -0,0 +1,7 @@
+package configs
+
+type IntelRdt struct {
+	// The schema for L3 cache id and capacity bitmask (CBM)
+	// Format: "L3:<cache_id0>=<cbm0>;<cache_id1>=<cbm1>;..."
+	L3CacheSchema string `json:"l3_cache_schema,omitempty"`
+}
diff --git a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
index 6814a5a..a6a107e 100644
--- a/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
+++ b/vendor/github.com/opencontainers/runc/libcontainer/nsenter/nsexec.c
@@ -1,3 +1,4 @@
+
 #define _GNU_SOURCE
 #include <endian.h>
 #include <errno.h>
@@ -19,6 +20,8 @@
 #include <sys/prctl.h>
 #include <sys/socket.h>
 #include <sys/types.h>
+#include <sys/wait.h>
+
 
 #include <linux/limits.h>
 #include <linux/netlink.h>
@@ -64,7 +67,13 @@
 
 struct nlconfig_t {
 	char *data;
+
+	/* Process settings. */
 	uint32_t cloneflags;
+	char *oom_score_adj;
+	size_t oom_score_adj_len;
+
+	/* User namespace settings.*/
 	char *uidmap;
 	size_t uidmap_len;
 	char *gidmap;
@@ -72,9 +81,13 @@
 	char *namespaces;
 	size_t namespaces_len;
 	uint8_t is_setgroup;
+
+	/* Rootless container settings.*/
 	uint8_t is_rootless;
-	char *oom_score_adj;
-	size_t oom_score_adj_len;
+	char *uidmappath;
+	size_t uidmappath_len;
+	char *gidmappath;
+	size_t gidmappath_len;
 };
 
 /*
@@ -89,6 +102,8 @@
 #define SETGROUP_ATTR		27285
 #define OOM_SCORE_ADJ_ATTR	27286
 #define ROOTLESS_ATTR	    27287
+#define UIDMAPPATH_ATTR	    27288
+#define GIDMAPPATH_ATTR	    27289
 
 /*
  * Use the raw syscall for versions of glibc which don't include a function for
@@ -191,22 +206,96 @@
 	}
 }
 
-static void update_uidmap(int pid, char *map, size_t map_len)
+static int try_mapping_tool(const char *app, int pid, char *map, size_t map_len)
 {
-	if (map == NULL || map_len <= 0)
-		return;
+	int child;
 
-	if (write_file(map, map_len, "/proc/%d/uid_map", pid) < 0)
-		bail("failed to update /proc/%d/uid_map", pid);
+	/*
+	 * If @app is NULL, execve will segfault. Just check it here and bail (if
+	 * we're in this path, the caller is already getting desparate and there
+	 * isn't a backup to this failing). This usually would be a configuration
+	 * or programming issue.
+	 */
+	if (!app)
+		bail("mapping tool not present");
+
+	child = fork();
+	if (child < 0)
+		bail("failed to fork");
+
+	if (!child) {
+#define MAX_ARGV 20
+		char *argv[MAX_ARGV];
+		char *envp[] = {NULL};
+		char pid_fmt[16];
+		int argc = 0;
+		char *next;
+
+		snprintf(pid_fmt, 16, "%d", pid);
+
+		argv[argc++] = (char *) app;
+		argv[argc++] = pid_fmt;
+		/*
+		 * Convert the map string into a list of argument that
+		 * newuidmap/newgidmap can understand.
+		 */
+
+		while (argc < MAX_ARGV) {
+			if (*map == '\0') {
+				argv[argc++] = NULL;
+				break;
+			}
+			argv[argc++] = map;
+			next = strpbrk(map, "\n ");
+			if (next == NULL)
+				break;
+			*next++ = '\0';
+			map = next + strspn(next, "\n ");
+		}
+
+		execve(app, argv, envp);
+		bail("failed to execv");
+	} else {
+		int status;
+
+		while (true) {
+			if (waitpid(child, &status, 0) < 0) {
+				if (errno == EINTR)
+					continue;
+				bail("failed to waitpid");
+			}
+			if (WIFEXITED(status) || WIFSIGNALED(status))
+				return WEXITSTATUS(status);
+		}
+	}
+
+	return -1;
 }
 
-static void update_gidmap(int pid, char *map, size_t map_len)
+static void update_uidmap(const char *path, int pid, char *map, size_t map_len)
 {
 	if (map == NULL || map_len <= 0)
 		return;
 
-	if (write_file(map, map_len, "/proc/%d/gid_map", pid) < 0)
-		bail("failed to update /proc/%d/gid_map", pid);
+	if (write_file(map, map_len, "/proc/%d/uid_map", pid) < 0) {
+		if (errno != EPERM)
+			bail("failed to update /proc/%d/uid_map", pid);
+		if (try_mapping_tool(path, pid, map, map_len))
+			bail("failed to use newuid map on %d", pid);
+	}
+}
+
+static void update_gidmap(const char *path, int pid, char *map, size_t map_len)
+{
+	if (map == NULL || map_len <= 0)
+		return;
+
+	if (write_file(map, map_len, "/proc/%d/gid_map", pid) < 0) {
+		if (errno != EPERM)
+			bail("failed to update /proc/%d/gid_map", pid);
+		if (try_mapping_tool(path, pid, map, map_len))
+			bail("failed to use newgid map on %d", pid);
+	}
 }
 
 static void update_oom_score_adj(char *data, size_t len)
@@ -350,6 +439,14 @@
 			config->gidmap = current;
 			config->gidmap_len = payload_len;
 			break;
+		case UIDMAPPATH_ATTR:
+			config->uidmappath = current;
+			config->uidmappath_len = payload_len;
+			break;
+		case GIDMAPPATH_ATTR:
+			config->gidmappath = current;
+			config->gidmappath_len = payload_len;
+			break;
 		case SETGROUP_ATTR:
 			config->is_setgroup = readint8(current);
 			break;
@@ -596,8 +693,8 @@
 						update_setgroups(child, SETGROUPS_DENY);
 
 					/* Set up mappings. */
-					update_uidmap(child, config.uidmap, config.uidmap_len);
-					update_gidmap(child, config.gidmap, config.gidmap_len);
+					update_uidmap(config.uidmappath, child, config.uidmap, config.uidmap_len);
+					update_gidmap(config.gidmappath, child, config.gidmap, config.gidmap_len);
 
 					s = SYNC_USERMAP_ACK;
 					if (write(syncfd, &s, sizeof(s)) != sizeof(s)) {
diff --git a/vendor/github.com/opencontainers/runc/vendor.conf b/vendor/github.com/opencontainers/runc/vendor.conf
index 9506b5c..1266ee4 100644
--- a/vendor/github.com/opencontainers/runc/vendor.conf
+++ b/vendor/github.com/opencontainers/runc/vendor.conf
@@ -18,4 +18,8 @@
 github.com/docker/docker 0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d
 github.com/docker/go-units v0.2.0
 github.com/urfave/cli d53eb991652b1d438abdd34ce4bfa3ef1539108e
-golang.org/x/sys 0e0164865330d5cf1c00247be08330bf96e2f87c https://github.com/golang/sys
+golang.org/x/sys 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce https://github.com/golang/sys
+
+# console dependencies
+github.com/containerd/console 84eeaae905fa414d03e07bcd6c8d3f19e7cf180e
+github.com/pkg/errors v0.8.0
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
index 569dcf0..c008a38 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/label/label_selinux.go
@@ -49,8 +49,10 @@
 				mcon[con[0]] = con[1]
 			}
 		}
+		_ = ReleaseLabel(processLabel)
 		processLabel = pcon.Get()
 		mountLabel = mcon.Get()
+		_ = ReserveLabel(processLabel)
 	}
 	return processLabel, mountLabel, nil
 }
diff --git a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
index 4cf2c45..de9316c 100644
--- a/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
+++ b/vendor/github.com/opencontainers/selinux/go-selinux/selinux.go
@@ -213,7 +213,7 @@
 	return lsetxattr(path, xattrNameSelinux, []byte(label), 0)
 }
 
-// Filecon returns the SELinux label for this path or returns an error.
+// FileLabel returns the SELinux label for this path or returns an error.
 func FileLabel(path string) (string, error) {
 	label, err := lgetxattr(path, xattrNameSelinux)
 	if err != nil {
@@ -331,7 +331,7 @@
 }
 
 /*
-SetEnforce sets the current SELinux mode Enforcing, Permissive.
+SetEnforceMode sets the current SELinux mode Enforcing, Permissive.
 Disabled is not valid, since this needs to be set at boot time.
 */
 func SetEnforceMode(mode int) error {
diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md
index 82aeb4e..4f5ce57 100644
--- a/vendor/github.com/sirupsen/logrus/README.md
+++ b/vendor/github.com/sirupsen/logrus/README.md
@@ -1,7 +1,7 @@
 # Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus)
 
 Logrus is a structured logger for Go (golang), completely API compatible with
-the standard library logger. [Godoc][godoc].
+the standard library logger.
 
 **Seeing weird case-sensitive problems?** It's in the past been possible to
 import Logrus as both upper- and lower-case. Due to the Go package environment,
@@ -372,6 +372,7 @@
 
 Third party logging formatters:
 
+* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine.
 * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
 * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
 * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go
index 320e5d5..5bf582e 100644
--- a/vendor/github.com/sirupsen/logrus/entry.go
+++ b/vendor/github.com/sirupsen/logrus/entry.go
@@ -35,6 +35,7 @@
 	Time time.Time
 
 	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+	// This field will be set on entry firing and the value will be equal to the one in Logger struct field.
 	Level Level
 
 	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go
index 1aeaa90..013183e 100644
--- a/vendor/github.com/sirupsen/logrus/exported.go
+++ b/vendor/github.com/sirupsen/logrus/exported.go
@@ -31,7 +31,7 @@
 func SetLevel(level Level) {
 	std.mu.Lock()
 	defer std.mu.Unlock()
-	std.setLevel(level)
+	std.SetLevel(level)
 }
 
 // GetLevel returns the standard logger level.
diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go
index b5fbe93..b183ff5 100644
--- a/vendor/github.com/sirupsen/logrus/formatter.go
+++ b/vendor/github.com/sirupsen/logrus/formatter.go
@@ -2,7 +2,7 @@
 
 import "time"
 
-const DefaultTimestampFormat = time.RFC3339
+const defaultTimestampFormat = time.RFC3339
 
 // The Formatter interface is used to implement a custom Formatter. It takes an
 // `Entry`. It exposes all the fields, including the default ones:
diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go
index e787ea1..fb01c1b 100644
--- a/vendor/github.com/sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/json_formatter.go
@@ -6,8 +6,11 @@
 )
 
 type fieldKey string
+
+// FieldMap allows customization of the key names for default fields.
 type FieldMap map[fieldKey]string
 
+// Default key names for the default fields
 const (
 	FieldKeyMsg   = "msg"
 	FieldKeyLevel = "level"
@@ -22,6 +25,7 @@
 	return string(key)
 }
 
+// JSONFormatter formats logs into parsable json
 type JSONFormatter struct {
 	// TimestampFormat sets the format used for marshaling timestamps.
 	TimestampFormat string
@@ -29,7 +33,7 @@
 	// DisableTimestamp allows disabling automatic timestamps in output
 	DisableTimestamp bool
 
-	// FieldMap allows users to customize the names of keys for various fields.
+	// FieldMap allows users to customize the names of keys for default fields.
 	// As an example:
 	// formatter := &JSONFormatter{
 	//   	FieldMap: FieldMap{
@@ -41,6 +45,7 @@
 	FieldMap FieldMap
 }
 
+// Format renders a single log entry
 func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
 	data := make(Fields, len(entry.Data)+3)
 	for k, v := range entry.Data {
@@ -57,7 +62,7 @@
 
 	timestampFormat := f.TimestampFormat
 	if timestampFormat == "" {
-		timestampFormat = DefaultTimestampFormat
+		timestampFormat = defaultTimestampFormat
 	}
 
 	if !f.DisableTimestamp {
diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go
index 370fff5..2acab05 100644
--- a/vendor/github.com/sirupsen/logrus/logger.go
+++ b/vendor/github.com/sirupsen/logrus/logger.go
@@ -25,7 +25,7 @@
 	Formatter Formatter
 	// The logging level the logger should log at. This is typically (and defaults
 	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
-	// logged. `logrus.Debug` is useful in
+	// logged.
 	Level Level
 	// Used to sync writing to the log. Locking is enabled by Default
 	mu MutexWrap
@@ -312,6 +312,6 @@
 	return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
 }
 
-func (logger *Logger) setLevel(level Level) {
+func (logger *Logger) SetLevel(level Level) {
 	atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
 }
diff --git a/vendor/github.com/sirupsen/logrus/terminal_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_appengine.go
deleted file mode 100644
index e011a86..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_appengine.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build appengine
-
-package logrus
-
-import "io"
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal(f io.Writer) bool {
-	return true
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
index 5f6be4d..d7b3893 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_bsd.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go
@@ -3,8 +3,8 @@
 
 package logrus
 
-import "syscall"
+import "golang.org/x/sys/unix"
 
-const ioctlReadTermios = syscall.TIOCGETA
+const ioctlReadTermios = unix.TIOCGETA
 
-type Termios syscall.Termios
+type Termios unix.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go
index 308160c..88d7298 100644
--- a/vendor/github.com/sirupsen/logrus/terminal_linux.go
+++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go
@@ -7,8 +7,8 @@
 
 package logrus
 
-import "syscall"
+import "golang.org/x/sys/unix"
 
-const ioctlReadTermios = syscall.TCGETS
+const ioctlReadTermios = unix.TCGETS
 
-type Termios syscall.Termios
+type Termios unix.Termios
diff --git a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
deleted file mode 100644
index 190297a..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_notwindows.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build linux darwin freebsd openbsd netbsd dragonfly
-// +build !appengine
-
-package logrus
-
-import (
-	"io"
-	"os"
-	"syscall"
-	"unsafe"
-)
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal(f io.Writer) bool {
-	var termios Termios
-	switch v := f.(type) {
-	case *os.File:
-		_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
-		return err == 0
-	default:
-		return false
-	}
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_solaris.go
deleted file mode 100644
index 3c86b1a..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_solaris.go
+++ /dev/null
@@ -1,21 +0,0 @@
-// +build solaris,!appengine
-
-package logrus
-
-import (
-	"io"
-	"os"
-
-	"golang.org/x/sys/unix"
-)
-
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal(f io.Writer) bool {
-	switch v := f.(type) {
-	case *os.File:
-		_, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
-		return err == nil
-	default:
-		return false
-	}
-}
diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go
deleted file mode 100644
index 7a33630..0000000
--- a/vendor/github.com/sirupsen/logrus/terminal_windows.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows,!appengine
-
-package logrus
-
-import (
-	"bytes"
-	"errors"
-	"io"
-	"os"
-	"os/exec"
-	"strconv"
-	"strings"
-	"syscall"
-	"unsafe"
-)
-
-var kernel32 = syscall.NewLazyDLL("kernel32.dll")
-
-var (
-	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
-	procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
-)
-
-const (
-	enableProcessedOutput           = 0x0001
-	enableWrapAtEolOutput           = 0x0002
-	enableVirtualTerminalProcessing = 0x0004
-)
-
-func getVersion() (float64, error) {
-	stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
-	cmd := exec.Command("cmd", "ver")
-	cmd.Stdout = stdout
-	cmd.Stderr = stderr
-	err := cmd.Run()
-	if err != nil {
-		return -1, err
-	}
-	
-	// The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
-	version := strings.Replace(stdout.String(), "\n", "", -1)
-	version = strings.Replace(version, "\r\n", "", -1)
-
-	x1 := strings.Index(version, "[Version")
-
-	if x1 == -1 || strings.Index(version, "]") == -1 {
-		return -1, errors.New("Can't determine Windows version")
-	}
-
-	return strconv.ParseFloat(version[x1+9:x1+13], 64)
-}
-
-func init() {
-	ver, err := getVersion()
-	if err != nil {
-		return
-	}
-
-	// Activate Virtual Processing for Windows CMD
-	// Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
-	if ver >= 10 {
-		handle := syscall.Handle(os.Stderr.Fd())
-		procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
-	}
-}
-
-// IsTerminal returns true if stderr's file descriptor is a terminal.
-func IsTerminal(f io.Writer) bool {
-	switch v := f.(type) {
-	case *os.File:
-		var st uint32
-		r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
-		return r != 0 && e == 0
-	default:
-		return false
-	}
-}
diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go
index 26dcc15..be412aa 100644
--- a/vendor/github.com/sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/sirupsen/logrus/text_formatter.go
@@ -3,10 +3,14 @@
 import (
 	"bytes"
 	"fmt"
+	"io"
+	"os"
 	"sort"
 	"strings"
 	"sync"
 	"time"
+
+	"golang.org/x/crypto/ssh/terminal"
 )
 
 const (
@@ -14,7 +18,7 @@
 	red     = 31
 	green   = 32
 	yellow  = 33
-	blue    = 34
+	blue    = 36
 	gray    = 37
 )
 
@@ -26,6 +30,7 @@
 	baseTimestamp = time.Now()
 }
 
+// TextFormatter formats logs into text
 type TextFormatter struct {
 	// Set to true to bypass checking for a TTY before outputting colors.
 	ForceColors bool
@@ -52,10 +57,6 @@
 	// QuoteEmptyFields will wrap empty fields in quotes if true
 	QuoteEmptyFields bool
 
-	// QuoteCharacter can be set to the override the default quoting character "
-	// with something else. For example: ', or `.
-	QuoteCharacter string
-
 	// Whether the logger's out is to a terminal
 	isTerminal bool
 
@@ -63,14 +64,21 @@
 }
 
 func (f *TextFormatter) init(entry *Entry) {
-	if len(f.QuoteCharacter) == 0 {
-		f.QuoteCharacter = "\""
-	}
 	if entry.Logger != nil {
-		f.isTerminal = IsTerminal(entry.Logger.Out)
+		f.isTerminal = f.checkIfTerminal(entry.Logger.Out)
 	}
 }
 
+func (f *TextFormatter) checkIfTerminal(w io.Writer) bool {
+	switch v := w.(type) {
+	case *os.File:
+		return terminal.IsTerminal(int(v.Fd()))
+	default:
+		return false
+	}
+}
+
+// Format renders a single log entry
 func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
 	var b *bytes.Buffer
 	keys := make([]string, 0, len(entry.Data))
@@ -95,7 +103,7 @@
 
 	timestampFormat := f.TimestampFormat
 	if timestampFormat == "" {
-		timestampFormat = DefaultTimestampFormat
+		timestampFormat = defaultTimestampFormat
 	}
 	if isColored {
 		f.printColored(b, entry, keys, timestampFormat)
@@ -153,7 +161,7 @@
 		if !((ch >= 'a' && ch <= 'z') ||
 			(ch >= 'A' && ch <= 'Z') ||
 			(ch >= '0' && ch <= '9') ||
-			ch == '-' || ch == '.') {
+			ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
 			return true
 		}
 	}
@@ -161,36 +169,23 @@
 }
 
 func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
-
+	if b.Len() > 0 {
+		b.WriteByte(' ')
+	}
 	b.WriteString(key)
 	b.WriteByte('=')
 	f.appendValue(b, value)
-	b.WriteByte(' ')
 }
 
 func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
-	switch value := value.(type) {
-	case string:
-		if !f.needsQuoting(value) {
-			b.WriteString(value)
-		} else {
-			b.WriteString(f.quoteString(value))
-		}
-	case error:
-		errmsg := value.Error()
-		if !f.needsQuoting(errmsg) {
-			b.WriteString(errmsg)
-		} else {
-			b.WriteString(f.quoteString(errmsg))
-		}
-	default:
-		fmt.Fprint(b, value)
+	stringVal, ok := value.(string)
+	if !ok {
+		stringVal = fmt.Sprint(value)
 	}
-}
 
-func (f *TextFormatter) quoteString(v string) string {
-	escapedQuote := fmt.Sprintf("\\%s", f.QuoteCharacter)
-	escapedValue := strings.Replace(v, f.QuoteCharacter, escapedQuote, -1)
-
-	return fmt.Sprintf("%s%v%s", f.QuoteCharacter, escapedValue, f.QuoteCharacter)
+	if !f.needsQuoting(stringVal) {
+		b.WriteString(stringVal)
+	} else {
+		b.WriteString(fmt.Sprintf("%q", stringVal))
+	}
 }
diff --git a/vendor/github.com/stevvooe/continuity/README.md b/vendor/github.com/stevvooe/continuity/README.md
deleted file mode 100644
index 50b64c2..0000000
--- a/vendor/github.com/stevvooe/continuity/README.md
+++ /dev/null
@@ -1,19 +0,0 @@
-# continuity
-
-[![GoDoc](https://godoc.org/github.com/containerd/continuity?status.svg)](https://godoc.org/github.com/containerd/continuity)
-[![Build Status](https://travis-ci.org/containerd/continuity.svg?branch=master)](https://travis-ci.org/containerd/continuity)
-
-A transport-agnostic, filesystem metadata manifest system
-
-This project is a staging area for experiments in providing transport agnostic
-metadata storage.
-
-Please see https://github.com/opencontainers/specs/issues/11 for more details.
-
-## Building Proto Package
-
-If you change the proto file you will need to rebuild the generated Go with `go generate`.
-
-```
-go generate ./proto
-```
diff --git a/vendor/github.com/stevvooe/continuity/sysx/copy_linux.go b/vendor/github.com/stevvooe/continuity/sysx/copy_linux.go
deleted file mode 100644
index d7ccbb2..0000000
--- a/vendor/github.com/stevvooe/continuity/sysx/copy_linux.go
+++ /dev/null
@@ -1,9 +0,0 @@
-package sysx
-
-// These functions will be generated by generate.sh
-//    $ GOOS=linux GOARCH=386 ./generate.sh copy
-//    $ GOOS=linux GOARCH=amd64 ./generate.sh copy
-//    $ GOOS=linux GOARCH=arm ./generate.sh copy
-//    $ GOOS=linux GOARCH=arm64 ./generate.sh copy
-
-//sys CopyFileRange(fdin uintptr, offin *int64, fdout uintptr, offout *int64, len int, flags int) (n int, err error)
diff --git a/vendor/github.com/stevvooe/continuity/sysx/xattr_freebsd.go b/vendor/github.com/stevvooe/continuity/sysx/xattr_freebsd.go
deleted file mode 100644
index 80dba49..0000000
--- a/vendor/github.com/stevvooe/continuity/sysx/xattr_freebsd.go
+++ /dev/null
@@ -1,53 +0,0 @@
-package sysx
-
-import (
-	"errors"
-)
-
-// Initial stub version for FreeBSD. FreeBSD has a different
-// syscall API from Darwin and Linux for extended attributes;
-// it is also not widely used. It is not exposed at all by the
-// Go syscall package, so we need to implement directly eventually.
-
-var unsupported error = errors.New("extended attributes unsupported on FreeBSD")
-
-// Listxattr calls syscall listxattr and reads all content
-// and returns a string array
-func Listxattr(path string) ([]string, error) {
-	return []string{}, nil
-}
-
-// Removexattr calls syscall removexattr
-func Removexattr(path string, attr string) (err error) {
-	return unsupported
-}
-
-// Setxattr calls syscall setxattr
-func Setxattr(path string, attr string, data []byte, flags int) (err error) {
-	return unsupported
-}
-
-// Getxattr calls syscall getxattr
-func Getxattr(path, attr string) ([]byte, error) {
-	return []byte{}, nil
-}
-
-// LListxattr lists xattrs, not following symlinks
-func LListxattr(path string) ([]string, error) {
-	return []string{}, nil
-}
-
-// LRemovexattr removes an xattr, not following symlinks
-func LRemovexattr(path string, attr string) (err error) {
-	return unsupported
-}
-
-// LSetxattr sets an xattr, not following symlinks
-func LSetxattr(path string, attr string, data []byte, flags int) (err error) {
-	return unsupported
-}
-
-// LGetxattr gets an xattr, not following symlinks
-func LGetxattr(path, attr string) ([]byte, error) {
-	return []byte{}, nil
-}
diff --git a/vendor/github.com/tonistiigi/fifo/LICENSE b/vendor/github.com/tonistiigi/fifo/LICENSE
deleted file mode 100644
index 8d318c1..0000000
--- a/vendor/github.com/tonistiigi/fifo/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-MIT
-
-Copyright (C) 2016 Tõnis Tiigi <tonistiigi@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/tonistiigi/fifo/fifo.go b/vendor/github.com/tonistiigi/fifo/fifo.go
deleted file mode 100644
index 9e8e2ca..0000000
--- a/vendor/github.com/tonistiigi/fifo/fifo.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package fifo
-
-import (
-	"io"
-	"os"
-	"runtime"
-	"sync"
-	"syscall"
-
-	"github.com/pkg/errors"
-	"golang.org/x/net/context"
-	"golang.org/x/sys/unix"
-)
-
-type fifo struct {
-	flag        int
-	opened      chan struct{}
-	closed      chan struct{}
-	closing     chan struct{}
-	err         error
-	file        *os.File
-	closingOnce sync.Once // close has been called
-	closedOnce  sync.Once // fifo is closed
-	handle      *handle
-}
-
-var leakCheckWg *sync.WaitGroup
-
-// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
-// Context can be used to cancel this function until open(2) has not returned.
-// Accepted flags:
-// - syscall.O_CREAT - create new fifo if one doesn't exist
-// - syscall.O_RDONLY - open fifo only from reader side
-// - syscall.O_WRONLY - open fifo only from writer side
-// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
-// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
-//     fifo isn't open. read/write will be connected after the actual fifo is
-//     open or after fifo is closed.
-func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error) {
-	if _, err := os.Stat(fn); err != nil {
-		if os.IsNotExist(err) && flag&syscall.O_CREAT != 0 {
-			if err := unix.Mkfifo(fn, uint32(perm&os.ModePerm)); err != nil && !os.IsExist(err) {
-				return nil, errors.Wrapf(err, "error creating fifo %v", fn)
-			}
-		} else {
-			return nil, err
-		}
-	}
-
-	block := flag&syscall.O_NONBLOCK == 0 || flag&syscall.O_RDWR != 0
-
-	flag &= ^syscall.O_CREAT
-	flag &= ^syscall.O_NONBLOCK
-
-	h, err := getHandle(fn)
-	if err != nil {
-		return nil, err
-	}
-
-	f := &fifo{
-		handle:  h,
-		flag:    flag,
-		opened:  make(chan struct{}),
-		closed:  make(chan struct{}),
-		closing: make(chan struct{}),
-	}
-
-	wg := leakCheckWg
-	if wg != nil {
-		wg.Add(2)
-	}
-
-	go func() {
-		if wg != nil {
-			defer wg.Done()
-		}
-		select {
-		case <-ctx.Done():
-			f.Close()
-		case <-f.opened:
-		case <-f.closed:
-		}
-	}()
-	go func() {
-		if wg != nil {
-			defer wg.Done()
-		}
-		var file *os.File
-		fn, err := h.Path()
-		if err == nil {
-			file, err = os.OpenFile(fn, flag, 0)
-		}
-		select {
-		case <-f.closing:
-			if err == nil {
-				select {
-				case <-ctx.Done():
-					err = ctx.Err()
-				default:
-					err = errors.Errorf("fifo %v was closed before opening", h.Name())
-				}
-				if file != nil {
-					file.Close()
-				}
-			}
-		default:
-		}
-		if err != nil {
-			f.closedOnce.Do(func() {
-				f.err = err
-				close(f.closed)
-			})
-			return
-		}
-		f.file = file
-		close(f.opened)
-	}()
-	if block {
-		select {
-		case <-f.opened:
-		case <-f.closed:
-			return nil, f.err
-		}
-	}
-	return f, nil
-}
-
-// Read from a fifo to a byte array.
-func (f *fifo) Read(b []byte) (int, error) {
-	if f.flag&syscall.O_WRONLY > 0 {
-		return 0, errors.New("reading from write-only fifo")
-	}
-	select {
-	case <-f.opened:
-		return f.file.Read(b)
-	default:
-	}
-	select {
-	case <-f.opened:
-		return f.file.Read(b)
-	case <-f.closed:
-		return 0, errors.New("reading from a closed fifo")
-	}
-}
-
-// Write from byte array to a fifo.
-func (f *fifo) Write(b []byte) (int, error) {
-	if f.flag&(syscall.O_WRONLY|syscall.O_RDWR) == 0 {
-		return 0, errors.New("writing to read-only fifo")
-	}
-	select {
-	case <-f.opened:
-		return f.file.Write(b)
-	default:
-	}
-	select {
-	case <-f.opened:
-		return f.file.Write(b)
-	case <-f.closed:
-		return 0, errors.New("writing to a closed fifo")
-	}
-}
-
-// Close the fifo. Next reads/writes will error. This method can also be used
-// before open(2) has returned and fifo was never opened.
-func (f *fifo) Close() (retErr error) {
-	for {
-		select {
-		case <-f.closed:
-			f.handle.Close()
-			return
-		default:
-			select {
-			case <-f.opened:
-				f.closedOnce.Do(func() {
-					retErr = f.file.Close()
-					f.err = retErr
-					close(f.closed)
-				})
-			default:
-				if f.flag&syscall.O_RDWR != 0 {
-					runtime.Gosched()
-					break
-				}
-				f.closingOnce.Do(func() {
-					close(f.closing)
-				})
-				reverseMode := syscall.O_WRONLY
-				if f.flag&syscall.O_WRONLY > 0 {
-					reverseMode = syscall.O_RDONLY
-				}
-				fn, err := f.handle.Path()
-				// if Close() is called concurrently(shouldn't) it may cause error
-				// because handle is closed
-				select {
-				case <-f.closed:
-				default:
-					if err != nil {
-						// Path has become invalid. We will leak a goroutine.
-						// This case should not happen in linux.
-						f.closedOnce.Do(func() {
-							f.err = err
-							close(f.closed)
-						})
-						<-f.closed
-						break
-					}
-					f, err := os.OpenFile(fn, reverseMode|syscall.O_NONBLOCK, 0)
-					if err == nil {
-						f.Close()
-					}
-					runtime.Gosched()
-				}
-			}
-		}
-	}
-}
diff --git a/vendor/github.com/tonistiigi/fifo/handle_linux.go b/vendor/github.com/tonistiigi/fifo/handle_linux.go
deleted file mode 100644
index 7bda64c..0000000
--- a/vendor/github.com/tonistiigi/fifo/handle_linux.go
+++ /dev/null
@@ -1,76 +0,0 @@
-// +build linux
-
-package fifo
-
-import (
-	"fmt"
-	"os"
-	"sync"
-	"syscall"
-
-	"github.com/pkg/errors"
-)
-
-const O_PATH = 010000000
-
-type handle struct {
-	f         *os.File
-	dev       uint64
-	ino       uint64
-	closeOnce sync.Once
-	name      string
-}
-
-func getHandle(fn string) (*handle, error) {
-	f, err := os.OpenFile(fn, O_PATH, 0)
-	if err != nil {
-		return nil, errors.Wrapf(err, "failed to open %v with O_PATH", fn)
-	}
-
-	var stat syscall.Stat_t
-	if err := syscall.Fstat(int(f.Fd()), &stat); err != nil {
-		f.Close()
-		return nil, errors.Wrapf(err, "failed to stat handle %v", f.Fd())
-	}
-
-	h := &handle{
-		f:    f,
-		name: fn,
-		dev:  stat.Dev,
-		ino:  stat.Ino,
-	}
-
-	// check /proc just in case
-	if _, err := os.Stat(h.procPath()); err != nil {
-		f.Close()
-		return nil, errors.Wrapf(err, "couldn't stat %v", h.procPath())
-	}
-
-	return h, nil
-}
-
-func (h *handle) procPath() string {
-	return fmt.Sprintf("/proc/self/fd/%d", h.f.Fd())
-}
-
-func (h *handle) Name() string {
-	return h.name
-}
-
-func (h *handle) Path() (string, error) {
-	var stat syscall.Stat_t
-	if err := syscall.Stat(h.procPath(), &stat); err != nil {
-		return "", errors.Wrapf(err, "path %v could not be statted", h.procPath())
-	}
-	if stat.Dev != h.dev || stat.Ino != h.ino {
-		return "", errors.Errorf("failed to verify handle %v/%v %v/%v", stat.Dev, h.dev, stat.Ino, h.ino)
-	}
-	return h.procPath(), nil
-}
-
-func (h *handle) Close() error {
-	h.closeOnce.Do(func() {
-		h.f.Close()
-	})
-	return nil
-}
diff --git a/vendor/github.com/tonistiigi/fifo/handle_nolinux.go b/vendor/github.com/tonistiigi/fifo/handle_nolinux.go
deleted file mode 100644
index d9648d8..0000000
--- a/vendor/github.com/tonistiigi/fifo/handle_nolinux.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// +build !linux
-
-package fifo
-
-import (
-	"syscall"
-
-	"github.com/pkg/errors"
-)
-
-type handle struct {
-	fn  string
-	dev uint64
-	ino uint64
-}
-
-func getHandle(fn string) (*handle, error) {
-	var stat syscall.Stat_t
-	if err := syscall.Stat(fn, &stat); err != nil {
-		return nil, errors.Wrapf(err, "failed to stat %v", fn)
-	}
-
-	h := &handle{
-		fn:  fn,
-		dev: uint64(stat.Dev),
-		ino: stat.Ino,
-	}
-
-	return h, nil
-}
-
-func (h *handle) Path() (string, error) {
-	var stat syscall.Stat_t
-	if err := syscall.Stat(h.fn, &stat); err != nil {
-		return "", errors.Wrapf(err, "path %v could not be statted", h.fn)
-	}
-	if uint64(stat.Dev) != h.dev || stat.Ino != h.ino {
-		return "", errors.Errorf("failed to verify handle %v/%v %v/%v for %v", stat.Dev, h.dev, stat.Ino, h.ino, h.fn)
-	}
-	return h.fn, nil
-}
-
-func (h *handle) Name() string {
-	return h.fn
-}
-
-func (h *handle) Close() error {
-	return nil
-}
diff --git a/vendor/github.com/tonistiigi/fifo/readme.md b/vendor/github.com/tonistiigi/fifo/readme.md
deleted file mode 100644
index 1c82669..0000000
--- a/vendor/github.com/tonistiigi/fifo/readme.md
+++ /dev/null
@@ -1,30 +0,0 @@
-### fifo
-
-Go package for handling fifos in a sane way.
-
-```
-// OpenFifo opens a fifo. Returns io.ReadWriteCloser.
-// Context can be used to cancel this function until open(2) has not returned.
-// Accepted flags:
-// - syscall.O_CREAT - create new fifo if one doesn't exist
-// - syscall.O_RDONLY - open fifo only from reader side
-// - syscall.O_WRONLY - open fifo only from writer side
-// - syscall.O_RDWR - open fifo from both sides, never block on syscall level
-// - syscall.O_NONBLOCK - return io.ReadWriteCloser even if other side of the
-//     fifo isn't open. read/write will be connected after the actual fifo is
-//     open or after fifo is closed.
-func OpenFifo(ctx context.Context, fn string, flag int, perm os.FileMode) (io.ReadWriteCloser, error)
-
-
-// Read from a fifo to a byte array.
-func (f *fifo) Read(b []byte) (int, error)
-
-
-// Write from byte array to a fifo.
-func (f *fifo) Write(b []byte) (int, error)
-
-
-// Close the fifo. Next reads/writes will error. This method can also be used
-// before open(2) has returned and fifo was never opened.
-func (f *fifo) Close() error 
-```
\ No newline at end of file
diff --git a/vendor/github.com/tonistiigi/fsutil/diff.go b/vendor/github.com/tonistiigi/fsutil/diff.go
index 1530973..6125ef7 100644
--- a/vendor/github.com/tonistiigi/fsutil/diff.go
+++ b/vendor/github.com/tonistiigi/fsutil/diff.go
@@ -1,6 +1,7 @@
 package fsutil
 
 import (
+	"hash"
 	"os"
 
 	"golang.org/x/net/context"
@@ -14,6 +15,8 @@
 
 type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error
 
+type ContentHasher func(*Stat) (hash.Hash, error)
+
 func GetWalkerFn(root string) walkerFn {
 	return func(ctx context.Context, pathC chan<- *currentPath) error {
 		return Walk(ctx, root, nil, func(path string, f os.FileInfo, err error) error {
@@ -35,3 +38,7 @@
 		})
 	}
 }
+
+func emptyWalker(ctx context.Context, pathC chan<- *currentPath) error {
+	return nil
+}
diff --git a/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go b/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go
index 3c9f078..4ac7ec5 100644
--- a/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go
+++ b/vendor/github.com/tonistiigi/fsutil/diff_containerd_linux.go
@@ -4,8 +4,8 @@
 	"bytes"
 	"syscall"
 
+	"github.com/containerd/continuity/sysx"
 	"github.com/pkg/errors"
-	"github.com/stevvooe/continuity/sysx"
 )
 
 // compareSysStat returns whether the stats are equivalent,
diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter.go b/vendor/github.com/tonistiigi/fsutil/diskwriter.go
index a54b4a7..a465615 100644
--- a/vendor/github.com/tonistiigi/fsutil/diskwriter.go
+++ b/vendor/github.com/tonistiigi/fsutil/diskwriter.go
@@ -1,11 +1,6 @@
-// +build linux windows
-
 package fsutil
 
 import (
-	"archive/tar"
-	"crypto/sha256"
-	"encoding/hex"
 	"hash"
 	"io"
 	"os"
@@ -14,8 +9,7 @@
 	"sync"
 	"time"
 
-	"github.com/docker/docker/pkg/archive"
-	"github.com/docker/docker/pkg/tarsum"
+	digest "github.com/opencontainers/go-digest"
 	"github.com/pkg/errors"
 	"golang.org/x/net/context"
 	"golang.org/x/sync/errgroup"
@@ -24,11 +18,15 @@
 type WriteToFunc func(context.Context, string, io.WriteCloser) error
 
 type DiskWriterOpt struct {
-	AsyncDataCb WriteToFunc
-	SyncDataCb  WriteToFunc
-	NotifyCb    func(ChangeKind, string, os.FileInfo, error) error
+	AsyncDataCb   WriteToFunc
+	SyncDataCb    WriteToFunc
+	NotifyCb      func(ChangeKind, string, os.FileInfo, error) error
+	ContentHasher ContentHasher
+	Filter        FilterFunc
 }
 
+type FilterFunc func(*Stat) bool
+
 type DiskWriter struct {
 	opt  DiskWriterOpt
 	dest string
@@ -37,6 +35,7 @@
 	ctx    context.Context
 	cancel func()
 	eg     *errgroup.Group
+	filter FilterFunc
 }
 
 func NewDiskWriter(ctx context.Context, dest string, opt DiskWriterOpt) (*DiskWriter, error) {
@@ -102,6 +101,12 @@
 		return errors.Errorf("%s invalid change without stat information", p)
 	}
 
+	if dw.filter != nil {
+		if ok := dw.filter(stat); !ok {
+			return nil
+		}
+	}
+
 	rename := true
 	oldFi, err := os.Lstat(destPath)
 	if err != nil {
@@ -202,7 +207,7 @@
 	var hw *hashedWriter
 	if dw.opt.NotifyCb != nil {
 		var err error
-		if hw, err = newHashWriter(p, fi, w); err != nil {
+		if hw, err = newHashWriter(dw.opt.ContentHasher, fi, w); err != nil {
 			return err
 		}
 		w = hw
@@ -229,13 +234,18 @@
 type hashedWriter struct {
 	os.FileInfo
 	io.Writer
-	h   hash.Hash
-	w   io.WriteCloser
-	sum string
+	h    hash.Hash
+	w    io.WriteCloser
+	dgst digest.Digest
 }
 
-func newHashWriter(p string, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) {
-	h, err := NewTarsumHash(p, fi)
+func newHashWriter(ch ContentHasher, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) {
+	stat, ok := fi.Sys().(*Stat)
+	if !ok {
+		return nil, errors.Errorf("invalid change without stat information")
+	}
+
+	h, err := ch(stat)
 	if err != nil {
 		return nil, err
 	}
@@ -249,15 +259,15 @@
 }
 
 func (hw *hashedWriter) Close() error {
-	hw.sum = string(hex.EncodeToString(hw.h.Sum(nil)))
+	hw.dgst = digest.NewDigest(digest.SHA256, hw.h)
 	if hw.w != nil {
 		return hw.w.Close()
 	}
 	return nil
 }
 
-func (hw *hashedWriter) Hash() string {
-	return hw.sum
+func (hw *hashedWriter) Digest() digest.Digest {
+	return hw.dgst
 }
 
 type lazyFileWriter struct {
@@ -310,44 +320,3 @@
 	randmu.Unlock()
 	return strconv.Itoa(int(1e9 + r%1e9))[1:]
 }
-
-func NewTarsumHash(p string, fi os.FileInfo) (hash.Hash, error) {
-	stat, ok := fi.Sys().(*Stat)
-	link := ""
-	if ok {
-		link = stat.Linkname
-	}
-	if fi.IsDir() {
-		p += string(os.PathSeparator)
-	}
-	h, err := archive.FileInfoHeader(p, fi, link)
-	if err != nil {
-		return nil, err
-	}
-	h.Name = p
-	if ok {
-		h.Uid = int(stat.Uid)
-		h.Gid = int(stat.Gid)
-		h.Linkname = stat.Linkname
-		if stat.Xattrs != nil {
-			h.Xattrs = make(map[string]string)
-			for k, v := range stat.Xattrs {
-				h.Xattrs[k] = string(v)
-			}
-		}
-	}
-	tsh := &tarsumHash{h: h, Hash: sha256.New()}
-	tsh.Reset()
-	return tsh, nil
-}
-
-// Reset resets the Hash to its initial state.
-func (tsh *tarsumHash) Reset() {
-	tsh.Hash.Reset()
-	tarsum.WriteV1Header(tsh.h, tsh.Hash)
-}
-
-type tarsumHash struct {
-	hash.Hash
-	h *tar.Header
-}
diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_darwin.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_darwin.go
new file mode 100644
index 0000000..94d3324
--- /dev/null
+++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_darwin.go
@@ -0,0 +1,7 @@
+// +build darwin
+
+package fsutil
+
+func chtimes(path string, un int64) error {
+	return nil
+}
diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go
index c6d97eb..74f08a1 100644
--- a/vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go
+++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_linux.go
@@ -3,36 +3,10 @@
 package fsutil
 
 import (
-	"os"
-	"syscall"
-
 	"github.com/pkg/errors"
-	"github.com/stevvooe/continuity/sysx"
 	"golang.org/x/sys/unix"
 )
 
-func rewriteMetadata(p string, stat *Stat) error {
-	for key, value := range stat.Xattrs {
-		sysx.Setxattr(p, key, value, 0)
-	}
-
-	if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil {
-		return errors.Wrapf(err, "failed to lchown %s", p)
-	}
-
-	if os.FileMode(stat.Mode)&os.ModeSymlink == 0 {
-		if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil {
-			return errors.Wrapf(err, "failed to chown %s", p)
-		}
-	}
-
-	if err := chtimes(p, stat.ModTime); err != nil {
-		return errors.Wrapf(err, "failed to chtimes %s", p)
-	}
-
-	return nil
-}
-
 func chtimes(path string, un int64) error {
 	var utimes [2]unix.Timespec
 	utimes[0] = unix.NsecToTimespec(un)
@@ -44,21 +18,3 @@
 
 	return nil
 }
-
-// handleTarTypeBlockCharFifo is an OS-specific helper function used by
-// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
-	mode := uint32(stat.Mode & 07777)
-	if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 {
-		mode |= syscall.S_IFCHR
-	} else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 {
-		mode |= syscall.S_IFIFO
-	} else {
-		mode |= syscall.S_IFBLK
-	}
-
-	if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil {
-		return err
-	}
-	return nil
-}
diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go
new file mode 100644
index 0000000..19dffab
--- /dev/null
+++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_unix.go
@@ -0,0 +1,51 @@
+// +build !windows
+
+package fsutil
+
+import (
+	"os"
+	"syscall"
+
+	"github.com/containerd/continuity/sysx"
+	"github.com/pkg/errors"
+)
+
+func rewriteMetadata(p string, stat *Stat) error {
+	for key, value := range stat.Xattrs {
+		sysx.Setxattr(p, key, value, 0)
+	}
+
+	if err := os.Lchown(p, int(stat.Uid), int(stat.Gid)); err != nil {
+		return errors.Wrapf(err, "failed to lchown %s", p)
+	}
+
+	if os.FileMode(stat.Mode)&os.ModeSymlink == 0 {
+		if err := os.Chmod(p, os.FileMode(stat.Mode)); err != nil {
+			return errors.Wrapf(err, "failed to chown %s", p)
+		}
+	}
+
+	if err := chtimes(p, stat.ModTime); err != nil {
+		return errors.Wrapf(err, "failed to chtimes %s", p)
+	}
+
+	return nil
+}
+
+// handleTarTypeBlockCharFifo is an OS-specific helper function used by
+// createTarFile to handle the following types of header: Block; Char; Fifo
+func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
+	mode := uint32(stat.Mode & 07777)
+	if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 {
+		mode |= syscall.S_IFCHR
+	} else if os.FileMode(stat.Mode)&os.ModeNamedPipe != 0 {
+		mode |= syscall.S_IFIFO
+	} else {
+		mode |= syscall.S_IFBLK
+	}
+
+	if err := syscall.Mknod(path, mode, int(mkdev(stat.Devmajor, stat.Devminor))); err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/vendor/github.com/tonistiigi/fsutil/receive.go b/vendor/github.com/tonistiigi/fsutil/receive.go
index e7cee2b..233c28b 100644
--- a/vendor/github.com/tonistiigi/fsutil/receive.go
+++ b/vendor/github.com/tonistiigi/fsutil/receive.go
@@ -1,5 +1,3 @@
-// +build linux windows
-
 package fsutil
 
 import (
@@ -12,29 +10,45 @@
 	"golang.org/x/sync/errgroup"
 )
 
-func Receive(ctx context.Context, conn Stream, dest string, notifyHashed ChangeFunc) error {
+type ReceiveOpt struct {
+	NotifyHashed  ChangeFunc
+	ContentHasher ContentHasher
+	ProgressCb    func(int, bool)
+	Merge         bool
+	Filter        FilterFunc
+}
+
+func Receive(ctx context.Context, conn Stream, dest string, opt ReceiveOpt) error {
 	ctx, cancel := context.WithCancel(context.Background())
 	defer cancel()
 
 	r := &receiver{
-		conn:         &syncStream{Stream: conn},
-		dest:         dest,
-		files:        make(map[string]uint32),
-		pipes:        make(map[uint32]io.WriteCloser),
-		notifyHashed: notifyHashed,
+		conn:          &syncStream{Stream: conn},
+		dest:          dest,
+		files:         make(map[string]uint32),
+		pipes:         make(map[uint32]io.WriteCloser),
+		notifyHashed:  opt.NotifyHashed,
+		contentHasher: opt.ContentHasher,
+		progressCb:    opt.ProgressCb,
+		merge:         opt.Merge,
+		filter:        opt.Filter,
 	}
 	return r.run(ctx)
 }
 
 type receiver struct {
-	dest    string
-	conn    Stream
-	files   map[string]uint32
-	pipes   map[uint32]io.WriteCloser
-	mu      sync.RWMutex
-	muPipes sync.RWMutex
+	dest       string
+	conn       Stream
+	files      map[string]uint32
+	pipes      map[uint32]io.WriteCloser
+	mu         sync.RWMutex
+	muPipes    sync.RWMutex
+	progressCb func(int, bool)
+	merge      bool
+	filter     FilterFunc
 
 	notifyHashed   ChangeFunc
+	contentHasher  ContentHasher
 	orderValidator Validator
 	hlValidator    Hardlinks
 }
@@ -81,8 +95,10 @@
 	g, ctx := errgroup.WithContext(ctx)
 
 	dw, err := NewDiskWriter(ctx, r.dest, DiskWriterOpt{
-		AsyncDataCb: r.asyncDataFunc,
-		NotifyCb:    r.notifyHashed,
+		AsyncDataCb:   r.asyncDataFunc,
+		NotifyCb:      r.notifyHashed,
+		ContentHasher: r.contentHasher,
+		Filter:        r.filter,
 	})
 	if err != nil {
 		return err
@@ -91,7 +107,11 @@
 	w := newDynamicWalker()
 
 	g.Go(func() error {
-		err := doubleWalkDiff(ctx, dw.HandleChange, GetWalkerFn(r.dest), w.fill)
+		destWalker := emptyWalker
+		if !r.merge {
+			destWalker = GetWalkerFn(r.dest)
+		}
+		err := doubleWalkDiff(ctx, dw.HandleChange, destWalker, w.fill)
 		if err != nil {
 			return err
 		}
@@ -105,12 +125,23 @@
 	g.Go(func() error {
 		var i uint32 = 0
 
+		size := 0
+		if r.progressCb != nil {
+			defer func() {
+				r.progressCb(size, true)
+			}()
+		}
 		var p Packet
 		for {
 			p = Packet{Data: p.Data[:0]}
 			if err := r.conn.RecvMsg(&p); err != nil {
 				return err
 			}
+			if r.progressCb != nil {
+				size += p.Size()
+				r.progressCb(size, false)
+			}
+
 			switch p.Type {
 			case PACKET_STAT:
 				if p.Stat == nil {
diff --git a/vendor/github.com/tonistiigi/fsutil/receive_unsupported.go b/vendor/github.com/tonistiigi/fsutil/receive_unsupported.go
deleted file mode 100644
index 8e83342..0000000
--- a/vendor/github.com/tonistiigi/fsutil/receive_unsupported.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// +build !linux,!windows
-
-package fsutil
-
-import (
-	"runtime"
-
-	"github.com/pkg/errors"
-	"golang.org/x/net/context"
-)
-
-func Receive(ctx context.Context, conn Stream, dest string, notifyHashed ChangeFunc) error {
-	return errors.Errorf("receive is unsupported in %s", runtime.GOOS)
-}
diff --git a/vendor/github.com/tonistiigi/fsutil/validator.go b/vendor/github.com/tonistiigi/fsutil/validator.go
index e4a5eba..2bd1287 100644
--- a/vendor/github.com/tonistiigi/fsutil/validator.go
+++ b/vendor/github.com/tonistiigi/fsutil/validator.go
@@ -2,7 +2,8 @@
 
 import (
 	"os"
-	"path/filepath"
+	"path"
+	"runtime"
 	"sort"
 	"strings"
 
@@ -26,14 +27,17 @@
 	if v.parentDirs == nil {
 		v.parentDirs = make([]parent, 1, 10)
 	}
-	if p != filepath.Clean(p) {
+	if runtime.GOOS == "windows" {
+		p = strings.Replace(p, "\\", "", -1)
+	}
+	if p != path.Clean(p) {
 		return errors.Errorf("invalid unclean path %s", p)
 	}
-	if filepath.IsAbs(p) {
+	if path.IsAbs(p) {
 		return errors.Errorf("abolute path %s not allowed", p)
 	}
-	dir := filepath.Dir(p)
-	base := filepath.Base(p)
+	dir := path.Dir(p)
+	base := path.Base(p)
 	if dir == "." {
 		dir = ""
 	}
@@ -51,12 +55,12 @@
 	}
 
 	if dir != v.parentDirs[len(v.parentDirs)-1].dir || v.parentDirs[i].last >= base {
-		return errors.Errorf("changes out of order: %q %q", p, filepath.Join(v.parentDirs[i].dir, v.parentDirs[i].last))
+		return errors.Errorf("changes out of order: %q %q", p, path.Join(v.parentDirs[i].dir, v.parentDirs[i].last))
 	}
 	v.parentDirs[i].last = base
 	if kind != ChangeKindDelete && fi.IsDir() {
 		v.parentDirs = append(v.parentDirs, parent{
-			dir:  filepath.Join(dir, base),
+			dir:  path.Join(dir, base),
 			last: "",
 		})
 	}
diff --git a/vendor/github.com/tonistiigi/fsutil/walker.go b/vendor/github.com/tonistiigi/fsutil/walker.go
index bfec609..db1af56 100644
--- a/vendor/github.com/tonistiigi/fsutil/walker.go
+++ b/vendor/github.com/tonistiigi/fsutil/walker.go
@@ -13,8 +13,9 @@
 )
 
 type WalkOpt struct {
-	IncludePaths    []string // todo: remove?
+	IncludePatterns []string
 	ExcludePatterns []string
+	Map             func(*Stat) bool
 }
 
 func Walk(ctx context.Context, p string, opt *WalkOpt, fn filepath.WalkFunc) error {
@@ -57,9 +58,9 @@
 		}
 
 		if opt != nil {
-			if opt.IncludePaths != nil {
+			if opt.IncludePatterns != nil {
 				matched := false
-				for _, p := range opt.IncludePaths {
+				for _, p := range opt.IncludePatterns {
 					if m, _ := filepath.Match(p, path); m {
 						matched = true
 						break
@@ -138,7 +139,12 @@
 		case <-ctx.Done():
 			return ctx.Err()
 		default:
-			if err := fn(path, &StatInfo{stat}, nil); err != nil {
+			if opt != nil && opt.Map != nil {
+				if allowed := opt.Map(stat); !allowed {
+					return nil
+				}
+			}
+			if err := fn(stat.Path, &StatInfo{stat}, nil); err != nil {
 				return err
 			}
 		}
diff --git a/vendor/github.com/tonistiigi/fsutil/walker_unix.go b/vendor/github.com/tonistiigi/fsutil/walker_unix.go
index 6f13c95..7e8ee80 100644
--- a/vendor/github.com/tonistiigi/fsutil/walker_unix.go
+++ b/vendor/github.com/tonistiigi/fsutil/walker_unix.go
@@ -6,8 +6,8 @@
 	"os"
 	"syscall"
 
+	"github.com/containerd/continuity/sysx"
 	"github.com/pkg/errors"
-	"github.com/stevvooe/continuity/sysx"
 )
 
 func loadXattr(origpath string, stat *Stat) error {
diff --git a/vendor/github.com/vbatts/tar-split/README.md b/vendor/github.com/vbatts/tar-split/README.md
index 4c544d8..03e3ec4 100644
--- a/vendor/github.com/vbatts/tar-split/README.md
+++ b/vendor/github.com/vbatts/tar-split/README.md
@@ -1,6 +1,7 @@
 # tar-split
 
 [![Build Status](https://travis-ci.org/vbatts/tar-split.svg?branch=master)](https://travis-ci.org/vbatts/tar-split)
+[![Go Report Card](https://goreportcard.com/badge/github.com/vbatts/tar-split)](https://goreportcard.com/report/github.com/vbatts/tar-split)
 
 Pristinely disassembling a tar archive, and stashing needed raw bytes and offsets to reassemble a validating original archive.
 
@@ -50,7 +51,7 @@
 contiguous file, though the archive contents may be recorded in sparse format.
 Therefore when adding the file payload to a reassembled tar, to achieve
 identical output, the file payload would need be precisely re-sparsified. This
-is not something I seek to fix imediately, but would rather have an alert that
+is not something I seek to fix immediately, but would rather have an alert that
 precise reassembly is not possible.
 (see more http://www.gnu.org/software/tar/manual/html_node/Sparse-Formats.html)
 
diff --git a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
index 54ef23a..009b3f5 100644
--- a/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
+++ b/vendor/github.com/vbatts/tar-split/tar/asm/disassemble.go
@@ -2,7 +2,6 @@
 
 import (
 	"io"
-	"io/ioutil"
 
 	"github.com/vbatts/tar-split/archive/tar"
 	"github.com/vbatts/tar-split/tar/storage"
@@ -119,20 +118,34 @@
 			}
 		}
 
-		// it is allowable, and not uncommon that there is further padding on the
-		// end of an archive, apart from the expected 1024 null bytes.
-		remainder, err := ioutil.ReadAll(outputRdr)
-		if err != nil && err != io.EOF {
-			pW.CloseWithError(err)
-			return
-		}
-		_, err = p.AddEntry(storage.Entry{
-			Type:    storage.SegmentType,
-			Payload: remainder,
-		})
-		if err != nil {
-			pW.CloseWithError(err)
-			return
+		// It is allowable, and not uncommon that there is further padding on
+		// the end of an archive, apart from the expected 1024 null bytes. We
+		// do this in chunks rather than in one go to avoid cases where a
+		// maliciously crafted tar file tries to trick us into reading many GBs
+		// into memory.
+		const paddingChunkSize = 1024 * 1024
+		var paddingChunk [paddingChunkSize]byte
+		for {
+			var isEOF bool
+			n, err := outputRdr.Read(paddingChunk[:])
+			if err != nil {
+				if err != io.EOF {
+					pW.CloseWithError(err)
+					return
+				}
+				isEOF = true
+			}
+			_, err = p.AddEntry(storage.Entry{
+				Type:    storage.SegmentType,
+				Payload: paddingChunk[:n],
+			})
+			if err != nil {
+				pW.CloseWithError(err)
+				return
+			}
+			if isEOF {
+				break
+			}
 		}
 		pW.Close()
 	}()
diff --git a/vendor/golang.org/x/crypto/README b/vendor/golang.org/x/crypto/README
deleted file mode 100644
index f1e0cbf..0000000
--- a/vendor/golang.org/x/crypto/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This repository holds supplementary Go cryptography libraries.
-
-To submit changes to this repository, see http://golang.org/doc/contribute.html.
diff --git a/vendor/golang.org/x/crypto/README.md b/vendor/golang.org/x/crypto/README.md
new file mode 100644
index 0000000..c9d6fec
--- /dev/null
+++ b/vendor/golang.org/x/crypto/README.md
@@ -0,0 +1,21 @@
+# Go Cryptography
+
+This repository holds supplementary Go cryptography libraries.
+
+## Download/Install
+
+The easiest way to install is to run `go get -u golang.org/x/crypto/...`. You
+can also manually git clone the repository to `$GOPATH/src/golang.org/x/crypto`.
+
+## Report Issues / Send Patches
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+
+The main issue tracker for the crypto repository is located at
+https://github.com/golang/go/issues. Prefix your issue with "x/crypto:" in the
+subject line, so it is easy to find.
+
+Note that contributions to the cryptography package receive additional scrutiny
+due to their sensitive nature. Patches may take longer than normal to receive
+feedback.
diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.h b/vendor/golang.org/x/crypto/curve25519/const_amd64.h
new file mode 100644
index 0000000..b3f7416
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.h
@@ -0,0 +1,8 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
+
+#define REDMASK51     0x0007FFFFFFFFFFFF
diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s
new file mode 100644
index 0000000..ee7b4bd
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/const_amd64.s
@@ -0,0 +1,20 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+// These constants cannot be encoded in non-MOVQ immediates.
+// We access them directly from memory instead.
+
+DATA ·_121666_213(SB)/8, $996687872
+GLOBL ·_121666_213(SB), 8, $8
+
+DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA
+GLOBL ·_2P0(SB), 8, $8
+
+DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE
+GLOBL ·_2P1234(SB), 8, $8
diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
new file mode 100644
index 0000000..cd793a5
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s
@@ -0,0 +1,65 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+// func cswap(inout *[4][5]uint64, v uint64)
+TEXT ·cswap(SB),7,$0
+	MOVQ inout+0(FP),DI
+	MOVQ v+8(FP),SI
+
+	SUBQ $1, SI
+	NOTQ SI
+	MOVQ SI, X15
+	PSHUFD $0x44, X15, X15
+
+	MOVOU 0(DI), X0
+	MOVOU 16(DI), X2
+	MOVOU 32(DI), X4
+	MOVOU 48(DI), X6
+	MOVOU 64(DI), X8
+	MOVOU 80(DI), X1
+	MOVOU 96(DI), X3
+	MOVOU 112(DI), X5
+	MOVOU 128(DI), X7
+	MOVOU 144(DI), X9
+
+	MOVO X1, X10
+	MOVO X3, X11
+	MOVO X5, X12
+	MOVO X7, X13
+	MOVO X9, X14
+
+	PXOR X0, X10
+	PXOR X2, X11
+	PXOR X4, X12
+	PXOR X6, X13
+	PXOR X8, X14
+	PAND X15, X10
+	PAND X15, X11
+	PAND X15, X12
+	PAND X15, X13
+	PAND X15, X14
+	PXOR X10, X0
+	PXOR X10, X1
+	PXOR X11, X2
+	PXOR X11, X3
+	PXOR X12, X4
+	PXOR X12, X5
+	PXOR X13, X6
+	PXOR X13, X7
+	PXOR X14, X8
+	PXOR X14, X9
+
+	MOVOU X0, 0(DI)
+	MOVOU X2, 16(DI)
+	MOVOU X4, 32(DI)
+	MOVOU X6, 48(DI)
+	MOVOU X8, 64(DI)
+	MOVOU X1, 80(DI)
+	MOVOU X3, 96(DI)
+	MOVOU X5, 112(DI)
+	MOVOU X7, 128(DI)
+	MOVOU X9, 144(DI)
+	RET
diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go
new file mode 100644
index 0000000..2d14c2a
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go
@@ -0,0 +1,834 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// We have a implementation in amd64 assembly so this code is only run on
+// non-amd64 platforms. The amd64 assembly does not support gccgo.
+// +build !amd64 gccgo appengine
+
+package curve25519
+
+import (
+	"encoding/binary"
+)
+
+// This code is a port of the public domain, "ref10" implementation of
+// curve25519 from SUPERCOP 20130419 by D. J. Bernstein.
+
+// fieldElement represents an element of the field GF(2^255 - 19). An element
+// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77
+// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on
+// context.
+type fieldElement [10]int32
+
+func feZero(fe *fieldElement) {
+	for i := range fe {
+		fe[i] = 0
+	}
+}
+
+func feOne(fe *fieldElement) {
+	feZero(fe)
+	fe[0] = 1
+}
+
+func feAdd(dst, a, b *fieldElement) {
+	for i := range dst {
+		dst[i] = a[i] + b[i]
+	}
+}
+
+func feSub(dst, a, b *fieldElement) {
+	for i := range dst {
+		dst[i] = a[i] - b[i]
+	}
+}
+
+func feCopy(dst, src *fieldElement) {
+	for i := range dst {
+		dst[i] = src[i]
+	}
+}
+
+// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0.
+//
+// Preconditions: b in {0,1}.
+func feCSwap(f, g *fieldElement, b int32) {
+	b = -b
+	for i := range f {
+		t := b & (f[i] ^ g[i])
+		f[i] ^= t
+		g[i] ^= t
+	}
+}
+
+// load3 reads a 24-bit, little-endian value from in.
+func load3(in []byte) int64 {
+	var r int64
+	r = int64(in[0])
+	r |= int64(in[1]) << 8
+	r |= int64(in[2]) << 16
+	return r
+}
+
+// load4 reads a 32-bit, little-endian value from in.
+func load4(in []byte) int64 {
+	return int64(binary.LittleEndian.Uint32(in))
+}
+
+func feFromBytes(dst *fieldElement, src *[32]byte) {
+	h0 := load4(src[:])
+	h1 := load3(src[4:]) << 6
+	h2 := load3(src[7:]) << 5
+	h3 := load3(src[10:]) << 3
+	h4 := load3(src[13:]) << 2
+	h5 := load4(src[16:])
+	h6 := load3(src[20:]) << 7
+	h7 := load3(src[23:]) << 5
+	h8 := load3(src[26:]) << 4
+	h9 := load3(src[29:]) << 2
+
+	var carry [10]int64
+	carry[9] = (h9 + 1<<24) >> 25
+	h0 += carry[9] * 19
+	h9 -= carry[9] << 25
+	carry[1] = (h1 + 1<<24) >> 25
+	h2 += carry[1]
+	h1 -= carry[1] << 25
+	carry[3] = (h3 + 1<<24) >> 25
+	h4 += carry[3]
+	h3 -= carry[3] << 25
+	carry[5] = (h5 + 1<<24) >> 25
+	h6 += carry[5]
+	h5 -= carry[5] << 25
+	carry[7] = (h7 + 1<<24) >> 25
+	h8 += carry[7]
+	h7 -= carry[7] << 25
+
+	carry[0] = (h0 + 1<<25) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	carry[2] = (h2 + 1<<25) >> 26
+	h3 += carry[2]
+	h2 -= carry[2] << 26
+	carry[4] = (h4 + 1<<25) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	carry[6] = (h6 + 1<<25) >> 26
+	h7 += carry[6]
+	h6 -= carry[6] << 26
+	carry[8] = (h8 + 1<<25) >> 26
+	h9 += carry[8]
+	h8 -= carry[8] << 26
+
+	dst[0] = int32(h0)
+	dst[1] = int32(h1)
+	dst[2] = int32(h2)
+	dst[3] = int32(h3)
+	dst[4] = int32(h4)
+	dst[5] = int32(h5)
+	dst[6] = int32(h6)
+	dst[7] = int32(h7)
+	dst[8] = int32(h8)
+	dst[9] = int32(h9)
+}
+
+// feToBytes marshals h to s.
+// Preconditions:
+//   |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Write p=2^255-19; q=floor(h/p).
+// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))).
+//
+// Proof:
+//   Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4.
+//   Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4.
+//
+//   Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9).
+//   Then 0<y<1.
+//
+//   Write r=h-pq.
+//   Have 0<=r<=p-1=2^255-20.
+//   Thus 0<=r+19(2^-255)r<r+19(2^-255)2^255<=2^255-1.
+//
+//   Write x=r+19(2^-255)r+y.
+//   Then 0<x<2^255 so floor(2^(-255)x) = 0 so floor(q+2^(-255)x) = q.
+//
+//   Have q+2^(-255)x = 2^(-255)(h + 19 2^(-25) h9 + 2^(-1))
+//   so floor(2^(-255)(h + 19 2^(-25) h9 + 2^(-1))) = q.
+func feToBytes(s *[32]byte, h *fieldElement) {
+	var carry [10]int32
+
+	q := (19*h[9] + (1 << 24)) >> 25
+	q = (h[0] + q) >> 26
+	q = (h[1] + q) >> 25
+	q = (h[2] + q) >> 26
+	q = (h[3] + q) >> 25
+	q = (h[4] + q) >> 26
+	q = (h[5] + q) >> 25
+	q = (h[6] + q) >> 26
+	q = (h[7] + q) >> 25
+	q = (h[8] + q) >> 26
+	q = (h[9] + q) >> 25
+
+	// Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20.
+	h[0] += 19 * q
+	// Goal: Output h-2^255 q, which is between 0 and 2^255-20.
+
+	carry[0] = h[0] >> 26
+	h[1] += carry[0]
+	h[0] -= carry[0] << 26
+	carry[1] = h[1] >> 25
+	h[2] += carry[1]
+	h[1] -= carry[1] << 25
+	carry[2] = h[2] >> 26
+	h[3] += carry[2]
+	h[2] -= carry[2] << 26
+	carry[3] = h[3] >> 25
+	h[4] += carry[3]
+	h[3] -= carry[3] << 25
+	carry[4] = h[4] >> 26
+	h[5] += carry[4]
+	h[4] -= carry[4] << 26
+	carry[5] = h[5] >> 25
+	h[6] += carry[5]
+	h[5] -= carry[5] << 25
+	carry[6] = h[6] >> 26
+	h[7] += carry[6]
+	h[6] -= carry[6] << 26
+	carry[7] = h[7] >> 25
+	h[8] += carry[7]
+	h[7] -= carry[7] << 25
+	carry[8] = h[8] >> 26
+	h[9] += carry[8]
+	h[8] -= carry[8] << 26
+	carry[9] = h[9] >> 25
+	h[9] -= carry[9] << 25
+	// h10 = carry9
+
+	// Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20.
+	// Have h[0]+...+2^230 h[9] between 0 and 2^255-1;
+	// evidently 2^255 h10-2^255 q = 0.
+	// Goal: Output h[0]+...+2^230 h[9].
+
+	s[0] = byte(h[0] >> 0)
+	s[1] = byte(h[0] >> 8)
+	s[2] = byte(h[0] >> 16)
+	s[3] = byte((h[0] >> 24) | (h[1] << 2))
+	s[4] = byte(h[1] >> 6)
+	s[5] = byte(h[1] >> 14)
+	s[6] = byte((h[1] >> 22) | (h[2] << 3))
+	s[7] = byte(h[2] >> 5)
+	s[8] = byte(h[2] >> 13)
+	s[9] = byte((h[2] >> 21) | (h[3] << 5))
+	s[10] = byte(h[3] >> 3)
+	s[11] = byte(h[3] >> 11)
+	s[12] = byte((h[3] >> 19) | (h[4] << 6))
+	s[13] = byte(h[4] >> 2)
+	s[14] = byte(h[4] >> 10)
+	s[15] = byte(h[4] >> 18)
+	s[16] = byte(h[5] >> 0)
+	s[17] = byte(h[5] >> 8)
+	s[18] = byte(h[5] >> 16)
+	s[19] = byte((h[5] >> 24) | (h[6] << 1))
+	s[20] = byte(h[6] >> 7)
+	s[21] = byte(h[6] >> 15)
+	s[22] = byte((h[6] >> 23) | (h[7] << 3))
+	s[23] = byte(h[7] >> 5)
+	s[24] = byte(h[7] >> 13)
+	s[25] = byte((h[7] >> 21) | (h[8] << 4))
+	s[26] = byte(h[8] >> 4)
+	s[27] = byte(h[8] >> 12)
+	s[28] = byte((h[8] >> 20) | (h[9] << 6))
+	s[29] = byte(h[9] >> 2)
+	s[30] = byte(h[9] >> 10)
+	s[31] = byte(h[9] >> 18)
+}
+
+// feMul calculates h = f * g
+// Can overlap h with f or g.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//    |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+//
+// Notes on implementation strategy:
+//
+// Using schoolbook multiplication.
+// Karatsuba would save a little in some cost models.
+//
+// Most multiplications by 2 and 19 are 32-bit precomputations;
+// cheaper than 64-bit postcomputations.
+//
+// There is one remaining multiplication by 19 in the carry chain;
+// one *19 precomputation can be merged into this,
+// but the resulting data flow is considerably less clean.
+//
+// There are 12 carries below.
+// 10 of them are 2-way parallelizable and vectorizable.
+// Can get away with 11 carries, but then data flow is much deeper.
+//
+// With tighter constraints on inputs can squeeze carries into int32.
+func feMul(h, f, g *fieldElement) {
+	f0 := f[0]
+	f1 := f[1]
+	f2 := f[2]
+	f3 := f[3]
+	f4 := f[4]
+	f5 := f[5]
+	f6 := f[6]
+	f7 := f[7]
+	f8 := f[8]
+	f9 := f[9]
+	g0 := g[0]
+	g1 := g[1]
+	g2 := g[2]
+	g3 := g[3]
+	g4 := g[4]
+	g5 := g[5]
+	g6 := g[6]
+	g7 := g[7]
+	g8 := g[8]
+	g9 := g[9]
+	g1_19 := 19 * g1 // 1.4*2^29
+	g2_19 := 19 * g2 // 1.4*2^30; still ok
+	g3_19 := 19 * g3
+	g4_19 := 19 * g4
+	g5_19 := 19 * g5
+	g6_19 := 19 * g6
+	g7_19 := 19 * g7
+	g8_19 := 19 * g8
+	g9_19 := 19 * g9
+	f1_2 := 2 * f1
+	f3_2 := 2 * f3
+	f5_2 := 2 * f5
+	f7_2 := 2 * f7
+	f9_2 := 2 * f9
+	f0g0 := int64(f0) * int64(g0)
+	f0g1 := int64(f0) * int64(g1)
+	f0g2 := int64(f0) * int64(g2)
+	f0g3 := int64(f0) * int64(g3)
+	f0g4 := int64(f0) * int64(g4)
+	f0g5 := int64(f0) * int64(g5)
+	f0g6 := int64(f0) * int64(g6)
+	f0g7 := int64(f0) * int64(g7)
+	f0g8 := int64(f0) * int64(g8)
+	f0g9 := int64(f0) * int64(g9)
+	f1g0 := int64(f1) * int64(g0)
+	f1g1_2 := int64(f1_2) * int64(g1)
+	f1g2 := int64(f1) * int64(g2)
+	f1g3_2 := int64(f1_2) * int64(g3)
+	f1g4 := int64(f1) * int64(g4)
+	f1g5_2 := int64(f1_2) * int64(g5)
+	f1g6 := int64(f1) * int64(g6)
+	f1g7_2 := int64(f1_2) * int64(g7)
+	f1g8 := int64(f1) * int64(g8)
+	f1g9_38 := int64(f1_2) * int64(g9_19)
+	f2g0 := int64(f2) * int64(g0)
+	f2g1 := int64(f2) * int64(g1)
+	f2g2 := int64(f2) * int64(g2)
+	f2g3 := int64(f2) * int64(g3)
+	f2g4 := int64(f2) * int64(g4)
+	f2g5 := int64(f2) * int64(g5)
+	f2g6 := int64(f2) * int64(g6)
+	f2g7 := int64(f2) * int64(g7)
+	f2g8_19 := int64(f2) * int64(g8_19)
+	f2g9_19 := int64(f2) * int64(g9_19)
+	f3g0 := int64(f3) * int64(g0)
+	f3g1_2 := int64(f3_2) * int64(g1)
+	f3g2 := int64(f3) * int64(g2)
+	f3g3_2 := int64(f3_2) * int64(g3)
+	f3g4 := int64(f3) * int64(g4)
+	f3g5_2 := int64(f3_2) * int64(g5)
+	f3g6 := int64(f3) * int64(g6)
+	f3g7_38 := int64(f3_2) * int64(g7_19)
+	f3g8_19 := int64(f3) * int64(g8_19)
+	f3g9_38 := int64(f3_2) * int64(g9_19)
+	f4g0 := int64(f4) * int64(g0)
+	f4g1 := int64(f4) * int64(g1)
+	f4g2 := int64(f4) * int64(g2)
+	f4g3 := int64(f4) * int64(g3)
+	f4g4 := int64(f4) * int64(g4)
+	f4g5 := int64(f4) * int64(g5)
+	f4g6_19 := int64(f4) * int64(g6_19)
+	f4g7_19 := int64(f4) * int64(g7_19)
+	f4g8_19 := int64(f4) * int64(g8_19)
+	f4g9_19 := int64(f4) * int64(g9_19)
+	f5g0 := int64(f5) * int64(g0)
+	f5g1_2 := int64(f5_2) * int64(g1)
+	f5g2 := int64(f5) * int64(g2)
+	f5g3_2 := int64(f5_2) * int64(g3)
+	f5g4 := int64(f5) * int64(g4)
+	f5g5_38 := int64(f5_2) * int64(g5_19)
+	f5g6_19 := int64(f5) * int64(g6_19)
+	f5g7_38 := int64(f5_2) * int64(g7_19)
+	f5g8_19 := int64(f5) * int64(g8_19)
+	f5g9_38 := int64(f5_2) * int64(g9_19)
+	f6g0 := int64(f6) * int64(g0)
+	f6g1 := int64(f6) * int64(g1)
+	f6g2 := int64(f6) * int64(g2)
+	f6g3 := int64(f6) * int64(g3)
+	f6g4_19 := int64(f6) * int64(g4_19)
+	f6g5_19 := int64(f6) * int64(g5_19)
+	f6g6_19 := int64(f6) * int64(g6_19)
+	f6g7_19 := int64(f6) * int64(g7_19)
+	f6g8_19 := int64(f6) * int64(g8_19)
+	f6g9_19 := int64(f6) * int64(g9_19)
+	f7g0 := int64(f7) * int64(g0)
+	f7g1_2 := int64(f7_2) * int64(g1)
+	f7g2 := int64(f7) * int64(g2)
+	f7g3_38 := int64(f7_2) * int64(g3_19)
+	f7g4_19 := int64(f7) * int64(g4_19)
+	f7g5_38 := int64(f7_2) * int64(g5_19)
+	f7g6_19 := int64(f7) * int64(g6_19)
+	f7g7_38 := int64(f7_2) * int64(g7_19)
+	f7g8_19 := int64(f7) * int64(g8_19)
+	f7g9_38 := int64(f7_2) * int64(g9_19)
+	f8g0 := int64(f8) * int64(g0)
+	f8g1 := int64(f8) * int64(g1)
+	f8g2_19 := int64(f8) * int64(g2_19)
+	f8g3_19 := int64(f8) * int64(g3_19)
+	f8g4_19 := int64(f8) * int64(g4_19)
+	f8g5_19 := int64(f8) * int64(g5_19)
+	f8g6_19 := int64(f8) * int64(g6_19)
+	f8g7_19 := int64(f8) * int64(g7_19)
+	f8g8_19 := int64(f8) * int64(g8_19)
+	f8g9_19 := int64(f8) * int64(g9_19)
+	f9g0 := int64(f9) * int64(g0)
+	f9g1_38 := int64(f9_2) * int64(g1_19)
+	f9g2_19 := int64(f9) * int64(g2_19)
+	f9g3_38 := int64(f9_2) * int64(g3_19)
+	f9g4_19 := int64(f9) * int64(g4_19)
+	f9g5_38 := int64(f9_2) * int64(g5_19)
+	f9g6_19 := int64(f9) * int64(g6_19)
+	f9g7_38 := int64(f9_2) * int64(g7_19)
+	f9g8_19 := int64(f9) * int64(g8_19)
+	f9g9_38 := int64(f9_2) * int64(g9_19)
+	h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38
+	h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19
+	h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38
+	h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19
+	h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38
+	h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19
+	h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38
+	h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19
+	h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38
+	h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0
+	var carry [10]int64
+
+	// |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38))
+	//   i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8
+	// |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19))
+	//   i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	// |h0| <= 2^25
+	// |h4| <= 2^25
+	// |h1| <= 1.51*2^58
+	// |h5| <= 1.51*2^58
+
+	carry[1] = (h1 + (1 << 24)) >> 25
+	h2 += carry[1]
+	h1 -= carry[1] << 25
+	carry[5] = (h5 + (1 << 24)) >> 25
+	h6 += carry[5]
+	h5 -= carry[5] << 25
+	// |h1| <= 2^24; from now on fits into int32
+	// |h5| <= 2^24; from now on fits into int32
+	// |h2| <= 1.21*2^59
+	// |h6| <= 1.21*2^59
+
+	carry[2] = (h2 + (1 << 25)) >> 26
+	h3 += carry[2]
+	h2 -= carry[2] << 26
+	carry[6] = (h6 + (1 << 25)) >> 26
+	h7 += carry[6]
+	h6 -= carry[6] << 26
+	// |h2| <= 2^25; from now on fits into int32 unchanged
+	// |h6| <= 2^25; from now on fits into int32 unchanged
+	// |h3| <= 1.51*2^58
+	// |h7| <= 1.51*2^58
+
+	carry[3] = (h3 + (1 << 24)) >> 25
+	h4 += carry[3]
+	h3 -= carry[3] << 25
+	carry[7] = (h7 + (1 << 24)) >> 25
+	h8 += carry[7]
+	h7 -= carry[7] << 25
+	// |h3| <= 2^24; from now on fits into int32 unchanged
+	// |h7| <= 2^24; from now on fits into int32 unchanged
+	// |h4| <= 1.52*2^33
+	// |h8| <= 1.52*2^33
+
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	carry[8] = (h8 + (1 << 25)) >> 26
+	h9 += carry[8]
+	h8 -= carry[8] << 26
+	// |h4| <= 2^25; from now on fits into int32 unchanged
+	// |h8| <= 2^25; from now on fits into int32 unchanged
+	// |h5| <= 1.01*2^24
+	// |h9| <= 1.51*2^58
+
+	carry[9] = (h9 + (1 << 24)) >> 25
+	h0 += carry[9] * 19
+	h9 -= carry[9] << 25
+	// |h9| <= 2^24; from now on fits into int32 unchanged
+	// |h0| <= 1.8*2^37
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	// |h0| <= 2^25; from now on fits into int32 unchanged
+	// |h1| <= 1.01*2^24
+
+	h[0] = int32(h0)
+	h[1] = int32(h1)
+	h[2] = int32(h2)
+	h[3] = int32(h3)
+	h[4] = int32(h4)
+	h[5] = int32(h5)
+	h[6] = int32(h6)
+	h[7] = int32(h7)
+	h[8] = int32(h8)
+	h[9] = int32(h9)
+}
+
+// feSquare calculates h = f*f. Can overlap h with f.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func feSquare(h, f *fieldElement) {
+	f0 := f[0]
+	f1 := f[1]
+	f2 := f[2]
+	f3 := f[3]
+	f4 := f[4]
+	f5 := f[5]
+	f6 := f[6]
+	f7 := f[7]
+	f8 := f[8]
+	f9 := f[9]
+	f0_2 := 2 * f0
+	f1_2 := 2 * f1
+	f2_2 := 2 * f2
+	f3_2 := 2 * f3
+	f4_2 := 2 * f4
+	f5_2 := 2 * f5
+	f6_2 := 2 * f6
+	f7_2 := 2 * f7
+	f5_38 := 38 * f5 // 1.31*2^30
+	f6_19 := 19 * f6 // 1.31*2^30
+	f7_38 := 38 * f7 // 1.31*2^30
+	f8_19 := 19 * f8 // 1.31*2^30
+	f9_38 := 38 * f9 // 1.31*2^30
+	f0f0 := int64(f0) * int64(f0)
+	f0f1_2 := int64(f0_2) * int64(f1)
+	f0f2_2 := int64(f0_2) * int64(f2)
+	f0f3_2 := int64(f0_2) * int64(f3)
+	f0f4_2 := int64(f0_2) * int64(f4)
+	f0f5_2 := int64(f0_2) * int64(f5)
+	f0f6_2 := int64(f0_2) * int64(f6)
+	f0f7_2 := int64(f0_2) * int64(f7)
+	f0f8_2 := int64(f0_2) * int64(f8)
+	f0f9_2 := int64(f0_2) * int64(f9)
+	f1f1_2 := int64(f1_2) * int64(f1)
+	f1f2_2 := int64(f1_2) * int64(f2)
+	f1f3_4 := int64(f1_2) * int64(f3_2)
+	f1f4_2 := int64(f1_2) * int64(f4)
+	f1f5_4 := int64(f1_2) * int64(f5_2)
+	f1f6_2 := int64(f1_2) * int64(f6)
+	f1f7_4 := int64(f1_2) * int64(f7_2)
+	f1f8_2 := int64(f1_2) * int64(f8)
+	f1f9_76 := int64(f1_2) * int64(f9_38)
+	f2f2 := int64(f2) * int64(f2)
+	f2f3_2 := int64(f2_2) * int64(f3)
+	f2f4_2 := int64(f2_2) * int64(f4)
+	f2f5_2 := int64(f2_2) * int64(f5)
+	f2f6_2 := int64(f2_2) * int64(f6)
+	f2f7_2 := int64(f2_2) * int64(f7)
+	f2f8_38 := int64(f2_2) * int64(f8_19)
+	f2f9_38 := int64(f2) * int64(f9_38)
+	f3f3_2 := int64(f3_2) * int64(f3)
+	f3f4_2 := int64(f3_2) * int64(f4)
+	f3f5_4 := int64(f3_2) * int64(f5_2)
+	f3f6_2 := int64(f3_2) * int64(f6)
+	f3f7_76 := int64(f3_2) * int64(f7_38)
+	f3f8_38 := int64(f3_2) * int64(f8_19)
+	f3f9_76 := int64(f3_2) * int64(f9_38)
+	f4f4 := int64(f4) * int64(f4)
+	f4f5_2 := int64(f4_2) * int64(f5)
+	f4f6_38 := int64(f4_2) * int64(f6_19)
+	f4f7_38 := int64(f4) * int64(f7_38)
+	f4f8_38 := int64(f4_2) * int64(f8_19)
+	f4f9_38 := int64(f4) * int64(f9_38)
+	f5f5_38 := int64(f5) * int64(f5_38)
+	f5f6_38 := int64(f5_2) * int64(f6_19)
+	f5f7_76 := int64(f5_2) * int64(f7_38)
+	f5f8_38 := int64(f5_2) * int64(f8_19)
+	f5f9_76 := int64(f5_2) * int64(f9_38)
+	f6f6_19 := int64(f6) * int64(f6_19)
+	f6f7_38 := int64(f6) * int64(f7_38)
+	f6f8_38 := int64(f6_2) * int64(f8_19)
+	f6f9_38 := int64(f6) * int64(f9_38)
+	f7f7_38 := int64(f7) * int64(f7_38)
+	f7f8_38 := int64(f7_2) * int64(f8_19)
+	f7f9_76 := int64(f7_2) * int64(f9_38)
+	f8f8_19 := int64(f8) * int64(f8_19)
+	f8f9_38 := int64(f8) * int64(f9_38)
+	f9f9_38 := int64(f9) * int64(f9_38)
+	h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38
+	h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38
+	h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19
+	h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38
+	h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38
+	h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38
+	h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19
+	h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38
+	h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38
+	h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2
+	var carry [10]int64
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+
+	carry[1] = (h1 + (1 << 24)) >> 25
+	h2 += carry[1]
+	h1 -= carry[1] << 25
+	carry[5] = (h5 + (1 << 24)) >> 25
+	h6 += carry[5]
+	h5 -= carry[5] << 25
+
+	carry[2] = (h2 + (1 << 25)) >> 26
+	h3 += carry[2]
+	h2 -= carry[2] << 26
+	carry[6] = (h6 + (1 << 25)) >> 26
+	h7 += carry[6]
+	h6 -= carry[6] << 26
+
+	carry[3] = (h3 + (1 << 24)) >> 25
+	h4 += carry[3]
+	h3 -= carry[3] << 25
+	carry[7] = (h7 + (1 << 24)) >> 25
+	h8 += carry[7]
+	h7 -= carry[7] << 25
+
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	carry[8] = (h8 + (1 << 25)) >> 26
+	h9 += carry[8]
+	h8 -= carry[8] << 26
+
+	carry[9] = (h9 + (1 << 24)) >> 25
+	h0 += carry[9] * 19
+	h9 -= carry[9] << 25
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+
+	h[0] = int32(h0)
+	h[1] = int32(h1)
+	h[2] = int32(h2)
+	h[3] = int32(h3)
+	h[4] = int32(h4)
+	h[5] = int32(h5)
+	h[6] = int32(h6)
+	h[7] = int32(h7)
+	h[8] = int32(h8)
+	h[9] = int32(h9)
+}
+
+// feMul121666 calculates h = f * 121666. Can overlap h with f.
+//
+// Preconditions:
+//    |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc.
+//
+// Postconditions:
+//    |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc.
+func feMul121666(h, f *fieldElement) {
+	h0 := int64(f[0]) * 121666
+	h1 := int64(f[1]) * 121666
+	h2 := int64(f[2]) * 121666
+	h3 := int64(f[3]) * 121666
+	h4 := int64(f[4]) * 121666
+	h5 := int64(f[5]) * 121666
+	h6 := int64(f[6]) * 121666
+	h7 := int64(f[7]) * 121666
+	h8 := int64(f[8]) * 121666
+	h9 := int64(f[9]) * 121666
+	var carry [10]int64
+
+	carry[9] = (h9 + (1 << 24)) >> 25
+	h0 += carry[9] * 19
+	h9 -= carry[9] << 25
+	carry[1] = (h1 + (1 << 24)) >> 25
+	h2 += carry[1]
+	h1 -= carry[1] << 25
+	carry[3] = (h3 + (1 << 24)) >> 25
+	h4 += carry[3]
+	h3 -= carry[3] << 25
+	carry[5] = (h5 + (1 << 24)) >> 25
+	h6 += carry[5]
+	h5 -= carry[5] << 25
+	carry[7] = (h7 + (1 << 24)) >> 25
+	h8 += carry[7]
+	h7 -= carry[7] << 25
+
+	carry[0] = (h0 + (1 << 25)) >> 26
+	h1 += carry[0]
+	h0 -= carry[0] << 26
+	carry[2] = (h2 + (1 << 25)) >> 26
+	h3 += carry[2]
+	h2 -= carry[2] << 26
+	carry[4] = (h4 + (1 << 25)) >> 26
+	h5 += carry[4]
+	h4 -= carry[4] << 26
+	carry[6] = (h6 + (1 << 25)) >> 26
+	h7 += carry[6]
+	h6 -= carry[6] << 26
+	carry[8] = (h8 + (1 << 25)) >> 26
+	h9 += carry[8]
+	h8 -= carry[8] << 26
+
+	h[0] = int32(h0)
+	h[1] = int32(h1)
+	h[2] = int32(h2)
+	h[3] = int32(h3)
+	h[4] = int32(h4)
+	h[5] = int32(h5)
+	h[6] = int32(h6)
+	h[7] = int32(h7)
+	h[8] = int32(h8)
+	h[9] = int32(h9)
+}
+
+// feInvert sets out = z^-1.
+func feInvert(out, z *fieldElement) {
+	var t0, t1, t2, t3 fieldElement
+	var i int
+
+	feSquare(&t0, z)
+	for i = 1; i < 1; i++ {
+		feSquare(&t0, &t0)
+	}
+	feSquare(&t1, &t0)
+	for i = 1; i < 2; i++ {
+		feSquare(&t1, &t1)
+	}
+	feMul(&t1, z, &t1)
+	feMul(&t0, &t0, &t1)
+	feSquare(&t2, &t0)
+	for i = 1; i < 1; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t1, &t1, &t2)
+	feSquare(&t2, &t1)
+	for i = 1; i < 5; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t1, &t2, &t1)
+	feSquare(&t2, &t1)
+	for i = 1; i < 10; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t2, &t2, &t1)
+	feSquare(&t3, &t2)
+	for i = 1; i < 20; i++ {
+		feSquare(&t3, &t3)
+	}
+	feMul(&t2, &t3, &t2)
+	feSquare(&t2, &t2)
+	for i = 1; i < 10; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t1, &t2, &t1)
+	feSquare(&t2, &t1)
+	for i = 1; i < 50; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t2, &t2, &t1)
+	feSquare(&t3, &t2)
+	for i = 1; i < 100; i++ {
+		feSquare(&t3, &t3)
+	}
+	feMul(&t2, &t3, &t2)
+	feSquare(&t2, &t2)
+	for i = 1; i < 50; i++ {
+		feSquare(&t2, &t2)
+	}
+	feMul(&t1, &t2, &t1)
+	feSquare(&t1, &t1)
+	for i = 1; i < 5; i++ {
+		feSquare(&t1, &t1)
+	}
+	feMul(out, &t1, &t0)
+}
+
+func scalarMult(out, in, base *[32]byte) {
+	var e [32]byte
+
+	copy(e[:], in[:])
+	e[0] &= 248
+	e[31] &= 127
+	e[31] |= 64
+
+	var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement
+	feFromBytes(&x1, base)
+	feOne(&x2)
+	feCopy(&x3, &x1)
+	feOne(&z3)
+
+	swap := int32(0)
+	for pos := 254; pos >= 0; pos-- {
+		b := e[pos/8] >> uint(pos&7)
+		b &= 1
+		swap ^= int32(b)
+		feCSwap(&x2, &x3, swap)
+		feCSwap(&z2, &z3, swap)
+		swap = int32(b)
+
+		feSub(&tmp0, &x3, &z3)
+		feSub(&tmp1, &x2, &z2)
+		feAdd(&x2, &x2, &z2)
+		feAdd(&z2, &x3, &z3)
+		feMul(&z3, &tmp0, &x2)
+		feMul(&z2, &z2, &tmp1)
+		feSquare(&tmp0, &tmp1)
+		feSquare(&tmp1, &x2)
+		feAdd(&x3, &z3, &z2)
+		feSub(&z2, &z3, &z2)
+		feMul(&x2, &tmp1, &tmp0)
+		feSub(&tmp1, &tmp1, &tmp0)
+		feSquare(&z2, &z2)
+		feMul121666(&z3, &tmp1)
+		feSquare(&x3, &x3)
+		feAdd(&tmp0, &tmp0, &z3)
+		feMul(&z3, &x1, &z2)
+		feMul(&z2, &tmp1, &tmp0)
+	}
+
+	feCSwap(&x2, &x3, swap)
+	feCSwap(&z2, &z3, swap)
+
+	feInvert(&z2, &z2)
+	feMul(&x2, &x2, &z2)
+	feToBytes(out, &x2)
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go
new file mode 100644
index 0000000..da9b10d
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/doc.go
@@ -0,0 +1,23 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package curve25519 provides an implementation of scalar multiplication on
+// the elliptic curve known as curve25519. See https://cr.yp.to/ecdh.html
+package curve25519 // import "golang.org/x/crypto/curve25519"
+
+// basePoint is the x coordinate of the generator of the curve.
+var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+
+// ScalarMult sets dst to the product in*base where dst and base are the x
+// coordinates of group points and all values are in little-endian form.
+func ScalarMult(dst, in, base *[32]byte) {
+	scalarMult(dst, in, base)
+}
+
+// ScalarBaseMult sets dst to the product in*base where dst and base are the x
+// coordinates of group points, base is the standard generator and all values
+// are in little-endian form.
+func ScalarBaseMult(dst, in *[32]byte) {
+	ScalarMult(dst, in, &basePoint)
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
new file mode 100644
index 0000000..3908161
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s
@@ -0,0 +1,73 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func freeze(inout *[5]uint64)
+TEXT ·freeze(SB),7,$0-8
+	MOVQ inout+0(FP), DI
+
+	MOVQ 0(DI),SI
+	MOVQ 8(DI),DX
+	MOVQ 16(DI),CX
+	MOVQ 24(DI),R8
+	MOVQ 32(DI),R9
+	MOVQ $REDMASK51,AX
+	MOVQ AX,R10
+	SUBQ $18,R10
+	MOVQ $3,R11
+REDUCELOOP:
+	MOVQ SI,R12
+	SHRQ $51,R12
+	ANDQ AX,SI
+	ADDQ R12,DX
+	MOVQ DX,R12
+	SHRQ $51,R12
+	ANDQ AX,DX
+	ADDQ R12,CX
+	MOVQ CX,R12
+	SHRQ $51,R12
+	ANDQ AX,CX
+	ADDQ R12,R8
+	MOVQ R8,R12
+	SHRQ $51,R12
+	ANDQ AX,R8
+	ADDQ R12,R9
+	MOVQ R9,R12
+	SHRQ $51,R12
+	ANDQ AX,R9
+	IMUL3Q $19,R12,R12
+	ADDQ R12,SI
+	SUBQ $1,R11
+	JA REDUCELOOP
+	MOVQ $1,R12
+	CMPQ R10,SI
+	CMOVQLT R11,R12
+	CMPQ AX,DX
+	CMOVQNE R11,R12
+	CMPQ AX,CX
+	CMOVQNE R11,R12
+	CMPQ AX,R8
+	CMOVQNE R11,R12
+	CMPQ AX,R9
+	CMOVQNE R11,R12
+	NEGQ R12
+	ANDQ R12,AX
+	ANDQ R12,R10
+	SUBQ R10,SI
+	SUBQ AX,DX
+	SUBQ AX,CX
+	SUBQ AX,R8
+	SUBQ AX,R9
+	MOVQ SI,0(DI)
+	MOVQ DX,8(DI)
+	MOVQ CX,16(DI)
+	MOVQ R8,24(DI)
+	MOVQ R9,32(DI)
+	RET
diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
new file mode 100644
index 0000000..9e9040b
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s
@@ -0,0 +1,1377 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func ladderstep(inout *[5][5]uint64)
+TEXT ·ladderstep(SB),0,$296-8
+	MOVQ inout+0(FP),DI
+
+	MOVQ 40(DI),SI
+	MOVQ 48(DI),DX
+	MOVQ 56(DI),CX
+	MOVQ 64(DI),R8
+	MOVQ 72(DI),R9
+	MOVQ SI,AX
+	MOVQ DX,R10
+	MOVQ CX,R11
+	MOVQ R8,R12
+	MOVQ R9,R13
+	ADDQ ·_2P0(SB),AX
+	ADDQ ·_2P1234(SB),R10
+	ADDQ ·_2P1234(SB),R11
+	ADDQ ·_2P1234(SB),R12
+	ADDQ ·_2P1234(SB),R13
+	ADDQ 80(DI),SI
+	ADDQ 88(DI),DX
+	ADDQ 96(DI),CX
+	ADDQ 104(DI),R8
+	ADDQ 112(DI),R9
+	SUBQ 80(DI),AX
+	SUBQ 88(DI),R10
+	SUBQ 96(DI),R11
+	SUBQ 104(DI),R12
+	SUBQ 112(DI),R13
+	MOVQ SI,0(SP)
+	MOVQ DX,8(SP)
+	MOVQ CX,16(SP)
+	MOVQ R8,24(SP)
+	MOVQ R9,32(SP)
+	MOVQ AX,40(SP)
+	MOVQ R10,48(SP)
+	MOVQ R11,56(SP)
+	MOVQ R12,64(SP)
+	MOVQ R13,72(SP)
+	MOVQ 40(SP),AX
+	MULQ 40(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 40(SP),AX
+	SHLQ $1,AX
+	MULQ 48(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 40(SP),AX
+	SHLQ $1,AX
+	MULQ 56(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 40(SP),AX
+	SHLQ $1,AX
+	MULQ 64(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 40(SP),AX
+	SHLQ $1,AX
+	MULQ 72(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 48(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 48(SP),AX
+	SHLQ $1,AX
+	MULQ 56(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 48(SP),AX
+	SHLQ $1,AX
+	MULQ 64(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 48(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 56(SP),AX
+	MULQ 56(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 56(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 64(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 56(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 64(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 64(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 64(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 72(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,80(SP)
+	MOVQ R8,88(SP)
+	MOVQ R9,96(SP)
+	MOVQ AX,104(SP)
+	MOVQ R10,112(SP)
+	MOVQ 0(SP),AX
+	MULQ 0(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 8(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 16(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 24(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 0(SP),AX
+	SHLQ $1,AX
+	MULQ 32(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	SHLQ $1,AX
+	MULQ 16(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 8(SP),AX
+	SHLQ $1,AX
+	MULQ 24(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 16(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 16(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 24(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 16(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 24(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 24(SP),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 32(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,120(SP)
+	MOVQ R8,128(SP)
+	MOVQ R9,136(SP)
+	MOVQ AX,144(SP)
+	MOVQ R10,152(SP)
+	MOVQ SI,SI
+	MOVQ R8,DX
+	MOVQ R9,CX
+	MOVQ AX,R8
+	MOVQ R10,R9
+	ADDQ ·_2P0(SB),SI
+	ADDQ ·_2P1234(SB),DX
+	ADDQ ·_2P1234(SB),CX
+	ADDQ ·_2P1234(SB),R8
+	ADDQ ·_2P1234(SB),R9
+	SUBQ 80(SP),SI
+	SUBQ 88(SP),DX
+	SUBQ 96(SP),CX
+	SUBQ 104(SP),R8
+	SUBQ 112(SP),R9
+	MOVQ SI,160(SP)
+	MOVQ DX,168(SP)
+	MOVQ CX,176(SP)
+	MOVQ R8,184(SP)
+	MOVQ R9,192(SP)
+	MOVQ 120(DI),SI
+	MOVQ 128(DI),DX
+	MOVQ 136(DI),CX
+	MOVQ 144(DI),R8
+	MOVQ 152(DI),R9
+	MOVQ SI,AX
+	MOVQ DX,R10
+	MOVQ CX,R11
+	MOVQ R8,R12
+	MOVQ R9,R13
+	ADDQ ·_2P0(SB),AX
+	ADDQ ·_2P1234(SB),R10
+	ADDQ ·_2P1234(SB),R11
+	ADDQ ·_2P1234(SB),R12
+	ADDQ ·_2P1234(SB),R13
+	ADDQ 160(DI),SI
+	ADDQ 168(DI),DX
+	ADDQ 176(DI),CX
+	ADDQ 184(DI),R8
+	ADDQ 192(DI),R9
+	SUBQ 160(DI),AX
+	SUBQ 168(DI),R10
+	SUBQ 176(DI),R11
+	SUBQ 184(DI),R12
+	SUBQ 192(DI),R13
+	MOVQ SI,200(SP)
+	MOVQ DX,208(SP)
+	MOVQ CX,216(SP)
+	MOVQ R8,224(SP)
+	MOVQ R9,232(SP)
+	MOVQ AX,240(SP)
+	MOVQ R10,248(SP)
+	MOVQ R11,256(SP)
+	MOVQ R12,264(SP)
+	MOVQ R13,272(SP)
+	MOVQ 224(SP),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,280(SP)
+	MULQ 56(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 232(SP),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,288(SP)
+	MULQ 48(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 200(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 200(SP),AX
+	MULQ 48(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 200(SP),AX
+	MULQ 56(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 200(SP),AX
+	MULQ 64(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 200(SP),AX
+	MULQ 72(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 208(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 208(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 208(SP),AX
+	MULQ 56(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 208(SP),AX
+	MULQ 64(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 208(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 216(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 216(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 216(SP),AX
+	MULQ 56(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 216(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 64(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 216(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 72(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 224(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 224(SP),AX
+	MULQ 48(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 280(SP),AX
+	MULQ 64(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 280(SP),AX
+	MULQ 72(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 232(SP),AX
+	MULQ 40(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 288(SP),AX
+	MULQ 56(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 288(SP),AX
+	MULQ 64(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 288(SP),AX
+	MULQ 72(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,40(SP)
+	MOVQ R8,48(SP)
+	MOVQ R9,56(SP)
+	MOVQ AX,64(SP)
+	MOVQ R10,72(SP)
+	MOVQ 264(SP),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,200(SP)
+	MULQ 16(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 272(SP),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,208(SP)
+	MULQ 8(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 240(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 240(SP),AX
+	MULQ 8(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 240(SP),AX
+	MULQ 16(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 240(SP),AX
+	MULQ 24(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 240(SP),AX
+	MULQ 32(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 248(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 248(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 248(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 248(SP),AX
+	MULQ 24(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 248(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 256(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 256(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 256(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 256(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 256(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 264(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 264(SP),AX
+	MULQ 8(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 200(SP),AX
+	MULQ 24(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 200(SP),AX
+	MULQ 32(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 272(SP),AX
+	MULQ 0(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 208(SP),AX
+	MULQ 16(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 208(SP),AX
+	MULQ 24(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 208(SP),AX
+	MULQ 32(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,DX
+	MOVQ R8,CX
+	MOVQ R9,R11
+	MOVQ AX,R12
+	MOVQ R10,R13
+	ADDQ ·_2P0(SB),DX
+	ADDQ ·_2P1234(SB),CX
+	ADDQ ·_2P1234(SB),R11
+	ADDQ ·_2P1234(SB),R12
+	ADDQ ·_2P1234(SB),R13
+	ADDQ 40(SP),SI
+	ADDQ 48(SP),R8
+	ADDQ 56(SP),R9
+	ADDQ 64(SP),AX
+	ADDQ 72(SP),R10
+	SUBQ 40(SP),DX
+	SUBQ 48(SP),CX
+	SUBQ 56(SP),R11
+	SUBQ 64(SP),R12
+	SUBQ 72(SP),R13
+	MOVQ SI,120(DI)
+	MOVQ R8,128(DI)
+	MOVQ R9,136(DI)
+	MOVQ AX,144(DI)
+	MOVQ R10,152(DI)
+	MOVQ DX,160(DI)
+	MOVQ CX,168(DI)
+	MOVQ R11,176(DI)
+	MOVQ R12,184(DI)
+	MOVQ R13,192(DI)
+	MOVQ 120(DI),AX
+	MULQ 120(DI)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 120(DI),AX
+	SHLQ $1,AX
+	MULQ 128(DI)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 120(DI),AX
+	SHLQ $1,AX
+	MULQ 136(DI)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 120(DI),AX
+	SHLQ $1,AX
+	MULQ 144(DI)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 120(DI),AX
+	SHLQ $1,AX
+	MULQ 152(DI)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 128(DI),AX
+	MULQ 128(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 128(DI),AX
+	SHLQ $1,AX
+	MULQ 136(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 128(DI),AX
+	SHLQ $1,AX
+	MULQ 144(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 128(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 152(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 136(DI),AX
+	MULQ 136(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 136(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 144(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 136(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 152(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 144(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 144(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 144(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 152(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 152(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 152(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,120(DI)
+	MOVQ R8,128(DI)
+	MOVQ R9,136(DI)
+	MOVQ AX,144(DI)
+	MOVQ R10,152(DI)
+	MOVQ 160(DI),AX
+	MULQ 160(DI)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 160(DI),AX
+	SHLQ $1,AX
+	MULQ 168(DI)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 160(DI),AX
+	SHLQ $1,AX
+	MULQ 176(DI)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 160(DI),AX
+	SHLQ $1,AX
+	MULQ 184(DI)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 160(DI),AX
+	SHLQ $1,AX
+	MULQ 192(DI)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 168(DI),AX
+	MULQ 168(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 168(DI),AX
+	SHLQ $1,AX
+	MULQ 176(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 168(DI),AX
+	SHLQ $1,AX
+	MULQ 184(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 168(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 192(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 176(DI),AX
+	MULQ 176(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 176(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 184(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 176(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 192(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 184(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 184(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 184(DI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 192(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 192(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 192(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	ANDQ DX,SI
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ADDQ R10,CX
+	ANDQ DX,R8
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ADDQ R12,CX
+	ANDQ DX,R9
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ADDQ R14,CX
+	ANDQ DX,AX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,160(DI)
+	MOVQ R8,168(DI)
+	MOVQ R9,176(DI)
+	MOVQ AX,184(DI)
+	MOVQ R10,192(DI)
+	MOVQ 184(DI),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,0(SP)
+	MULQ 16(DI)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 192(DI),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,8(SP)
+	MULQ 8(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 160(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 160(DI),AX
+	MULQ 8(DI)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 160(DI),AX
+	MULQ 16(DI)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 160(DI),AX
+	MULQ 24(DI)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 160(DI),AX
+	MULQ 32(DI)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 168(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 168(DI),AX
+	MULQ 8(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 168(DI),AX
+	MULQ 16(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 168(DI),AX
+	MULQ 24(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 168(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 176(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 176(DI),AX
+	MULQ 8(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 176(DI),AX
+	MULQ 16(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 176(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(DI)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 176(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 184(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 184(DI),AX
+	MULQ 8(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 0(SP),AX
+	MULQ 24(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SP),AX
+	MULQ 32(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 192(DI),AX
+	MULQ 0(DI)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 16(DI)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 8(SP),AX
+	MULQ 24(DI)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	MULQ 32(DI)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,160(DI)
+	MOVQ R8,168(DI)
+	MOVQ R9,176(DI)
+	MOVQ AX,184(DI)
+	MOVQ R10,192(DI)
+	MOVQ 144(SP),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,0(SP)
+	MULQ 96(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 152(SP),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,8(SP)
+	MULQ 88(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 120(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 120(SP),AX
+	MULQ 88(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 120(SP),AX
+	MULQ 96(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 120(SP),AX
+	MULQ 104(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 120(SP),AX
+	MULQ 112(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 128(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 128(SP),AX
+	MULQ 88(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 128(SP),AX
+	MULQ 96(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 128(SP),AX
+	MULQ 104(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 128(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 112(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 136(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 136(SP),AX
+	MULQ 88(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 136(SP),AX
+	MULQ 96(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 136(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 104(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 136(SP),DX
+	IMUL3Q $19,DX,AX
+	MULQ 112(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 144(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 144(SP),AX
+	MULQ 88(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 0(SP),AX
+	MULQ 104(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SP),AX
+	MULQ 112(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 152(SP),AX
+	MULQ 80(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 96(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 8(SP),AX
+	MULQ 104(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	MULQ 112(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,40(DI)
+	MOVQ R8,48(DI)
+	MOVQ R9,56(DI)
+	MOVQ AX,64(DI)
+	MOVQ R10,72(DI)
+	MOVQ 160(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 168(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	ADDQ AX,CX
+	MOVQ DX,R8
+	MOVQ 176(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	ADDQ AX,R8
+	MOVQ DX,R9
+	MOVQ 184(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	ADDQ AX,R9
+	MOVQ DX,R10
+	MOVQ 192(SP),AX
+	MULQ ·_121666_213(SB)
+	SHRQ $13,AX
+	ADDQ AX,R10
+	IMUL3Q $19,DX,DX
+	ADDQ DX,SI
+	ADDQ 80(SP),SI
+	ADDQ 88(SP),CX
+	ADDQ 96(SP),R8
+	ADDQ 104(SP),R9
+	ADDQ 112(SP),R10
+	MOVQ SI,80(DI)
+	MOVQ CX,88(DI)
+	MOVQ R8,96(DI)
+	MOVQ R9,104(DI)
+	MOVQ R10,112(DI)
+	MOVQ 104(DI),SI
+	IMUL3Q $19,SI,AX
+	MOVQ AX,0(SP)
+	MULQ 176(SP)
+	MOVQ AX,SI
+	MOVQ DX,CX
+	MOVQ 112(DI),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,8(SP)
+	MULQ 168(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 80(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 80(DI),AX
+	MULQ 168(SP)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 80(DI),AX
+	MULQ 176(SP)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 80(DI),AX
+	MULQ 184(SP)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 80(DI),AX
+	MULQ 192(SP)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 88(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 88(DI),AX
+	MULQ 168(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 88(DI),AX
+	MULQ 176(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 88(DI),AX
+	MULQ 184(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 88(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 192(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 96(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 96(DI),AX
+	MULQ 168(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 96(DI),AX
+	MULQ 176(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 96(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 184(SP)
+	ADDQ AX,SI
+	ADCQ DX,CX
+	MOVQ 96(DI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 192(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 104(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 104(DI),AX
+	MULQ 168(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 0(SP),AX
+	MULQ 184(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SP),AX
+	MULQ 192(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 112(DI),AX
+	MULQ 160(SP)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SP),AX
+	MULQ 176(SP)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 8(SP),AX
+	MULQ 184(SP)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	MULQ 192(SP)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ $REDMASK51,DX
+	SHLQ $13,CX:SI
+	ANDQ DX,SI
+	SHLQ $13,R9:R8
+	ANDQ DX,R8
+	ADDQ CX,R8
+	SHLQ $13,R11:R10
+	ANDQ DX,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ DX,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ DX,R14
+	ADDQ R13,R14
+	IMUL3Q $19,R15,CX
+	ADDQ CX,SI
+	MOVQ SI,CX
+	SHRQ $51,CX
+	ADDQ R8,CX
+	MOVQ CX,R8
+	SHRQ $51,CX
+	ANDQ DX,SI
+	ADDQ R10,CX
+	MOVQ CX,R9
+	SHRQ $51,CX
+	ANDQ DX,R8
+	ADDQ R12,CX
+	MOVQ CX,AX
+	SHRQ $51,CX
+	ANDQ DX,R9
+	ADDQ R14,CX
+	MOVQ CX,R10
+	SHRQ $51,CX
+	ANDQ DX,AX
+	IMUL3Q $19,CX,CX
+	ADDQ CX,SI
+	ANDQ DX,R10
+	MOVQ SI,80(DI)
+	MOVQ R8,88(DI)
+	MOVQ R9,96(DI)
+	MOVQ AX,104(DI)
+	MOVQ R10,112(DI)
+	RET
diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
new file mode 100644
index 0000000..5822bd5
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+package curve25519
+
+// These functions are implemented in the .s files. The names of the functions
+// in the rest of the file are also taken from the SUPERCOP sources to help
+// people following along.
+
+//go:noescape
+
+func cswap(inout *[5]uint64, v uint64)
+
+//go:noescape
+
+func ladderstep(inout *[5][5]uint64)
+
+//go:noescape
+
+func freeze(inout *[5]uint64)
+
+//go:noescape
+
+func mul(dest, a, b *[5]uint64)
+
+//go:noescape
+
+func square(out, in *[5]uint64)
+
+// mladder uses a Montgomery ladder to calculate (xr/zr) *= s.
+func mladder(xr, zr *[5]uint64, s *[32]byte) {
+	var work [5][5]uint64
+
+	work[0] = *xr
+	setint(&work[1], 1)
+	setint(&work[2], 0)
+	work[3] = *xr
+	setint(&work[4], 1)
+
+	j := uint(6)
+	var prevbit byte
+
+	for i := 31; i >= 0; i-- {
+		for j < 8 {
+			bit := ((*s)[i] >> j) & 1
+			swap := bit ^ prevbit
+			prevbit = bit
+			cswap(&work[1], uint64(swap))
+			ladderstep(&work)
+			j--
+		}
+		j = 7
+	}
+
+	*xr = work[1]
+	*zr = work[2]
+}
+
+func scalarMult(out, in, base *[32]byte) {
+	var e [32]byte
+	copy(e[:], (*in)[:])
+	e[0] &= 248
+	e[31] &= 127
+	e[31] |= 64
+
+	var t, z [5]uint64
+	unpack(&t, base)
+	mladder(&t, &z, &e)
+	invert(&z, &z)
+	mul(&t, &t, &z)
+	pack(out, &t)
+}
+
+func setint(r *[5]uint64, v uint64) {
+	r[0] = v
+	r[1] = 0
+	r[2] = 0
+	r[3] = 0
+	r[4] = 0
+}
+
+// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian
+// order.
+func unpack(r *[5]uint64, x *[32]byte) {
+	r[0] = uint64(x[0]) |
+		uint64(x[1])<<8 |
+		uint64(x[2])<<16 |
+		uint64(x[3])<<24 |
+		uint64(x[4])<<32 |
+		uint64(x[5])<<40 |
+		uint64(x[6]&7)<<48
+
+	r[1] = uint64(x[6])>>3 |
+		uint64(x[7])<<5 |
+		uint64(x[8])<<13 |
+		uint64(x[9])<<21 |
+		uint64(x[10])<<29 |
+		uint64(x[11])<<37 |
+		uint64(x[12]&63)<<45
+
+	r[2] = uint64(x[12])>>6 |
+		uint64(x[13])<<2 |
+		uint64(x[14])<<10 |
+		uint64(x[15])<<18 |
+		uint64(x[16])<<26 |
+		uint64(x[17])<<34 |
+		uint64(x[18])<<42 |
+		uint64(x[19]&1)<<50
+
+	r[3] = uint64(x[19])>>1 |
+		uint64(x[20])<<7 |
+		uint64(x[21])<<15 |
+		uint64(x[22])<<23 |
+		uint64(x[23])<<31 |
+		uint64(x[24])<<39 |
+		uint64(x[25]&15)<<47
+
+	r[4] = uint64(x[25])>>4 |
+		uint64(x[26])<<4 |
+		uint64(x[27])<<12 |
+		uint64(x[28])<<20 |
+		uint64(x[29])<<28 |
+		uint64(x[30])<<36 |
+		uint64(x[31]&127)<<44
+}
+
+// pack sets out = x where out is the usual, little-endian form of the 5,
+// 51-bit limbs in x.
+func pack(out *[32]byte, x *[5]uint64) {
+	t := *x
+	freeze(&t)
+
+	out[0] = byte(t[0])
+	out[1] = byte(t[0] >> 8)
+	out[2] = byte(t[0] >> 16)
+	out[3] = byte(t[0] >> 24)
+	out[4] = byte(t[0] >> 32)
+	out[5] = byte(t[0] >> 40)
+	out[6] = byte(t[0] >> 48)
+
+	out[6] ^= byte(t[1]<<3) & 0xf8
+	out[7] = byte(t[1] >> 5)
+	out[8] = byte(t[1] >> 13)
+	out[9] = byte(t[1] >> 21)
+	out[10] = byte(t[1] >> 29)
+	out[11] = byte(t[1] >> 37)
+	out[12] = byte(t[1] >> 45)
+
+	out[12] ^= byte(t[2]<<6) & 0xc0
+	out[13] = byte(t[2] >> 2)
+	out[14] = byte(t[2] >> 10)
+	out[15] = byte(t[2] >> 18)
+	out[16] = byte(t[2] >> 26)
+	out[17] = byte(t[2] >> 34)
+	out[18] = byte(t[2] >> 42)
+	out[19] = byte(t[2] >> 50)
+
+	out[19] ^= byte(t[3]<<1) & 0xfe
+	out[20] = byte(t[3] >> 7)
+	out[21] = byte(t[3] >> 15)
+	out[22] = byte(t[3] >> 23)
+	out[23] = byte(t[3] >> 31)
+	out[24] = byte(t[3] >> 39)
+	out[25] = byte(t[3] >> 47)
+
+	out[25] ^= byte(t[4]<<4) & 0xf0
+	out[26] = byte(t[4] >> 4)
+	out[27] = byte(t[4] >> 12)
+	out[28] = byte(t[4] >> 20)
+	out[29] = byte(t[4] >> 28)
+	out[30] = byte(t[4] >> 36)
+	out[31] = byte(t[4] >> 44)
+}
+
+// invert calculates r = x^-1 mod p using Fermat's little theorem.
+func invert(r *[5]uint64, x *[5]uint64) {
+	var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64
+
+	square(&z2, x)        /* 2 */
+	square(&t, &z2)       /* 4 */
+	square(&t, &t)        /* 8 */
+	mul(&z9, &t, x)       /* 9 */
+	mul(&z11, &z9, &z2)   /* 11 */
+	square(&t, &z11)      /* 22 */
+	mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */
+
+	square(&t, &z2_5_0)      /* 2^6 - 2^1 */
+	for i := 1; i < 5; i++ { /* 2^20 - 2^10 */
+		square(&t, &t)
+	}
+	mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */
+
+	square(&t, &z2_10_0)      /* 2^11 - 2^1 */
+	for i := 1; i < 10; i++ { /* 2^20 - 2^10 */
+		square(&t, &t)
+	}
+	mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */
+
+	square(&t, &z2_20_0)      /* 2^21 - 2^1 */
+	for i := 1; i < 20; i++ { /* 2^40 - 2^20 */
+		square(&t, &t)
+	}
+	mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */
+
+	square(&t, &t)            /* 2^41 - 2^1 */
+	for i := 1; i < 10; i++ { /* 2^50 - 2^10 */
+		square(&t, &t)
+	}
+	mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */
+
+	square(&t, &z2_50_0)      /* 2^51 - 2^1 */
+	for i := 1; i < 50; i++ { /* 2^100 - 2^50 */
+		square(&t, &t)
+	}
+	mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */
+
+	square(&t, &z2_100_0)      /* 2^101 - 2^1 */
+	for i := 1; i < 100; i++ { /* 2^200 - 2^100 */
+		square(&t, &t)
+	}
+	mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */
+
+	square(&t, &t)            /* 2^201 - 2^1 */
+	for i := 1; i < 50; i++ { /* 2^250 - 2^50 */
+		square(&t, &t)
+	}
+	mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */
+
+	square(&t, &t) /* 2^251 - 2^1 */
+	square(&t, &t) /* 2^252 - 2^2 */
+	square(&t, &t) /* 2^253 - 2^3 */
+
+	square(&t, &t) /* 2^254 - 2^4 */
+
+	square(&t, &t)   /* 2^255 - 2^5 */
+	mul(r, &t, &z11) /* 2^255 - 21 */
+}
diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
new file mode 100644
index 0000000..5ce80a2
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s
@@ -0,0 +1,169 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func mul(dest, a, b *[5]uint64)
+TEXT ·mul(SB),0,$16-24
+	MOVQ dest+0(FP), DI
+	MOVQ a+8(FP), SI
+	MOVQ b+16(FP), DX
+
+	MOVQ DX,CX
+	MOVQ 24(SI),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,0(SP)
+	MULQ 16(CX)
+	MOVQ AX,R8
+	MOVQ DX,R9
+	MOVQ 32(SI),DX
+	IMUL3Q $19,DX,AX
+	MOVQ AX,8(SP)
+	MULQ 8(CX)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 0(SI),AX
+	MULQ 8(CX)
+	MOVQ AX,R10
+	MOVQ DX,R11
+	MOVQ 0(SI),AX
+	MULQ 16(CX)
+	MOVQ AX,R12
+	MOVQ DX,R13
+	MOVQ 0(SI),AX
+	MULQ 24(CX)
+	MOVQ AX,R14
+	MOVQ DX,R15
+	MOVQ 0(SI),AX
+	MULQ 32(CX)
+	MOVQ AX,BX
+	MOVQ DX,BP
+	MOVQ 8(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SI),AX
+	MULQ 8(CX)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 8(SI),AX
+	MULQ 16(CX)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 8(SI),AX
+	MULQ 24(CX)
+	ADDQ AX,BX
+	ADCQ DX,BP
+	MOVQ 8(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(CX)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 16(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 16(SI),AX
+	MULQ 8(CX)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 16(SI),AX
+	MULQ 16(CX)
+	ADDQ AX,BX
+	ADCQ DX,BP
+	MOVQ 16(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(CX)
+	ADDQ AX,R8
+	ADCQ DX,R9
+	MOVQ 16(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(CX)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 24(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ 24(SI),AX
+	MULQ 8(CX)
+	ADDQ AX,BX
+	ADCQ DX,BP
+	MOVQ 0(SP),AX
+	MULQ 24(CX)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 0(SP),AX
+	MULQ 32(CX)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 32(SI),AX
+	MULQ 0(CX)
+	ADDQ AX,BX
+	ADCQ DX,BP
+	MOVQ 8(SP),AX
+	MULQ 16(CX)
+	ADDQ AX,R10
+	ADCQ DX,R11
+	MOVQ 8(SP),AX
+	MULQ 24(CX)
+	ADDQ AX,R12
+	ADCQ DX,R13
+	MOVQ 8(SP),AX
+	MULQ 32(CX)
+	ADDQ AX,R14
+	ADCQ DX,R15
+	MOVQ $REDMASK51,SI
+	SHLQ $13,R9:R8
+	ANDQ SI,R8
+	SHLQ $13,R11:R10
+	ANDQ SI,R10
+	ADDQ R9,R10
+	SHLQ $13,R13:R12
+	ANDQ SI,R12
+	ADDQ R11,R12
+	SHLQ $13,R15:R14
+	ANDQ SI,R14
+	ADDQ R13,R14
+	SHLQ $13,BP:BX
+	ANDQ SI,BX
+	ADDQ R15,BX
+	IMUL3Q $19,BP,DX
+	ADDQ DX,R8
+	MOVQ R8,DX
+	SHRQ $51,DX
+	ADDQ R10,DX
+	MOVQ DX,CX
+	SHRQ $51,DX
+	ANDQ SI,R8
+	ADDQ R12,DX
+	MOVQ DX,R9
+	SHRQ $51,DX
+	ANDQ SI,CX
+	ADDQ R14,DX
+	MOVQ DX,AX
+	SHRQ $51,DX
+	ANDQ SI,R9
+	ADDQ BX,DX
+	MOVQ DX,R10
+	SHRQ $51,DX
+	ANDQ SI,AX
+	IMUL3Q $19,DX,DX
+	ADDQ DX,R8
+	ANDQ SI,R10
+	MOVQ R8,0(DI)
+	MOVQ CX,8(DI)
+	MOVQ R9,16(DI)
+	MOVQ AX,24(DI)
+	MOVQ R10,32(DI)
+	RET
diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
new file mode 100644
index 0000000..12f7373
--- /dev/null
+++ b/vendor/golang.org/x/crypto/curve25519/square_amd64.s
@@ -0,0 +1,132 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This code was translated into a form compatible with 6a from the public
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
+
+// +build amd64,!gccgo,!appengine
+
+#include "const_amd64.h"
+
+// func square(out, in *[5]uint64)
+TEXT ·square(SB),7,$0-16
+	MOVQ out+0(FP), DI
+	MOVQ in+8(FP), SI
+
+	MOVQ 0(SI),AX
+	MULQ 0(SI)
+	MOVQ AX,CX
+	MOVQ DX,R8
+	MOVQ 0(SI),AX
+	SHLQ $1,AX
+	MULQ 8(SI)
+	MOVQ AX,R9
+	MOVQ DX,R10
+	MOVQ 0(SI),AX
+	SHLQ $1,AX
+	MULQ 16(SI)
+	MOVQ AX,R11
+	MOVQ DX,R12
+	MOVQ 0(SI),AX
+	SHLQ $1,AX
+	MULQ 24(SI)
+	MOVQ AX,R13
+	MOVQ DX,R14
+	MOVQ 0(SI),AX
+	SHLQ $1,AX
+	MULQ 32(SI)
+	MOVQ AX,R15
+	MOVQ DX,BX
+	MOVQ 8(SI),AX
+	MULQ 8(SI)
+	ADDQ AX,R11
+	ADCQ DX,R12
+	MOVQ 8(SI),AX
+	SHLQ $1,AX
+	MULQ 16(SI)
+	ADDQ AX,R13
+	ADCQ DX,R14
+	MOVQ 8(SI),AX
+	SHLQ $1,AX
+	MULQ 24(SI)
+	ADDQ AX,R15
+	ADCQ DX,BX
+	MOVQ 8(SI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SI)
+	ADDQ AX,CX
+	ADCQ DX,R8
+	MOVQ 16(SI),AX
+	MULQ 16(SI)
+	ADDQ AX,R15
+	ADCQ DX,BX
+	MOVQ 16(SI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 24(SI)
+	ADDQ AX,CX
+	ADCQ DX,R8
+	MOVQ 16(SI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SI)
+	ADDQ AX,R9
+	ADCQ DX,R10
+	MOVQ 24(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 24(SI)
+	ADDQ AX,R9
+	ADCQ DX,R10
+	MOVQ 24(SI),DX
+	IMUL3Q $38,DX,AX
+	MULQ 32(SI)
+	ADDQ AX,R11
+	ADCQ DX,R12
+	MOVQ 32(SI),DX
+	IMUL3Q $19,DX,AX
+	MULQ 32(SI)
+	ADDQ AX,R13
+	ADCQ DX,R14
+	MOVQ $REDMASK51,SI
+	SHLQ $13,R8:CX
+	ANDQ SI,CX
+	SHLQ $13,R10:R9
+	ANDQ SI,R9
+	ADDQ R8,R9
+	SHLQ $13,R12:R11
+	ANDQ SI,R11
+	ADDQ R10,R11
+	SHLQ $13,R14:R13
+	ANDQ SI,R13
+	ADDQ R12,R13
+	SHLQ $13,BX:R15
+	ANDQ SI,R15
+	ADDQ R14,R15
+	IMUL3Q $19,BX,DX
+	ADDQ DX,CX
+	MOVQ CX,DX
+	SHRQ $51,DX
+	ADDQ R9,DX
+	ANDQ SI,CX
+	MOVQ DX,R8
+	SHRQ $51,DX
+	ADDQ R11,DX
+	ANDQ SI,R8
+	MOVQ DX,R9
+	SHRQ $51,DX
+	ADDQ R13,DX
+	ANDQ SI,R9
+	MOVQ DX,AX
+	SHRQ $51,DX
+	ADDQ R15,DX
+	ANDQ SI,AX
+	MOVQ DX,R10
+	SHRQ $51,DX
+	IMUL3Q $19,DX,DX
+	ADDQ DX,CX
+	ANDQ SI,R10
+	MOVQ CX,0(DI)
+	MOVQ R8,8(DI)
+	MOVQ R9,16(DI)
+	MOVQ AX,24(DI)
+	MOVQ R10,32(DI)
+	RET
diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
index dbf31bb..1e1dff5 100644
--- a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
+++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go
@@ -13,7 +13,7 @@
 message, etc. Nonces are long enough that randomly generated nonces have
 negligible risk of collision.
 
-This package is interoperable with NaCl: http://nacl.cr.yp.to/secretbox.html.
+This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html.
 */
 package secretbox // import "golang.org/x/crypto/nacl/secretbox"
 
diff --git a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go
index 284d2a6..233b8b6 100644
--- a/vendor/golang.org/x/crypto/pkcs12/bmp-string.go
+++ b/vendor/golang.org/x/crypto/pkcs12/bmp-string.go
@@ -13,7 +13,7 @@
 func bmpString(s string) ([]byte, error) {
 	// References:
 	// https://tools.ietf.org/html/rfc7292#appendix-B.1
-	// http://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
+	// https://en.wikipedia.org/wiki/Plane_(Unicode)#Basic_Multilingual_Plane
 	//  - non-BMP characters are encoded in UTF 16 by using a surrogate pair of 16-bit codes
 	//	  EncodeRune returns 0xfffd if the rune does not need special encoding
 	//  - the above RFC provides the info that BMPStrings are NULL terminated.
diff --git a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go
index ad6341e..eff9ad3 100644
--- a/vendor/golang.org/x/crypto/pkcs12/pkcs12.go
+++ b/vendor/golang.org/x/crypto/pkcs12/pkcs12.go
@@ -109,6 +109,10 @@
 
 	bags, encodedPassword, err := getSafeContents(pfxData, encodedPassword)
 
+	if err != nil {
+		return nil, err
+	}
+
 	blocks := make([]*pem.Block, 0, len(bags))
 	for _, bag := range bags {
 		block, err := convertBag(&bag, encodedPassword)
diff --git a/vendor/golang.org/x/crypto/poly1305/const_amd64.s b/vendor/golang.org/x/crypto/poly1305/const_amd64.s
deleted file mode 100644
index 8e861f3..0000000
--- a/vendor/golang.org/x/crypto/poly1305/const_amd64.s
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-DATA ·SCALE(SB)/8, $0x37F4000000000000
-GLOBL ·SCALE(SB), 8, $8
-DATA ·TWO32(SB)/8, $0x41F0000000000000
-GLOBL ·TWO32(SB), 8, $8
-DATA ·TWO64(SB)/8, $0x43F0000000000000
-GLOBL ·TWO64(SB), 8, $8
-DATA ·TWO96(SB)/8, $0x45F0000000000000
-GLOBL ·TWO96(SB), 8, $8
-DATA ·ALPHA32(SB)/8, $0x45E8000000000000
-GLOBL ·ALPHA32(SB), 8, $8
-DATA ·ALPHA64(SB)/8, $0x47E8000000000000
-GLOBL ·ALPHA64(SB), 8, $8
-DATA ·ALPHA96(SB)/8, $0x49E8000000000000
-GLOBL ·ALPHA96(SB), 8, $8
-DATA ·ALPHA130(SB)/8, $0x4C08000000000000
-GLOBL ·ALPHA130(SB), 8, $8
-DATA ·DOFFSET0(SB)/8, $0x4330000000000000
-GLOBL ·DOFFSET0(SB), 8, $8
-DATA ·DOFFSET1(SB)/8, $0x4530000000000000
-GLOBL ·DOFFSET1(SB), 8, $8
-DATA ·DOFFSET2(SB)/8, $0x4730000000000000
-GLOBL ·DOFFSET2(SB), 8, $8
-DATA ·DOFFSET3(SB)/8, $0x4930000000000000
-GLOBL ·DOFFSET3(SB), 8, $8
-DATA ·DOFFSET3MINUSTWO128(SB)/8, $0x492FFFFE00000000
-GLOBL ·DOFFSET3MINUSTWO128(SB), 8, $8
-DATA ·HOFFSET0(SB)/8, $0x43300001FFFFFFFB
-GLOBL ·HOFFSET0(SB), 8, $8
-DATA ·HOFFSET1(SB)/8, $0x45300001FFFFFFFE
-GLOBL ·HOFFSET1(SB), 8, $8
-DATA ·HOFFSET2(SB)/8, $0x47300001FFFFFFFE
-GLOBL ·HOFFSET2(SB), 8, $8
-DATA ·HOFFSET3(SB)/8, $0x49300003FFFFFFFE
-GLOBL ·HOFFSET3(SB), 8, $8
-DATA ·ROUNDING(SB)/2, $0x137f
-GLOBL ·ROUNDING(SB), 8, $2
diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go
index 4a5f826..f562fa5 100644
--- a/vendor/golang.org/x/crypto/poly1305/poly1305.go
+++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go
@@ -3,7 +3,8 @@
 // license that can be found in the LICENSE file.
 
 /*
-Package poly1305 implements Poly1305 one-time message authentication code as specified in http://cr.yp.to/mac/poly1305-20050329.pdf.
+Package poly1305 implements Poly1305 one-time message authentication code as
+specified in https://cr.yp.to/mac/poly1305-20050329.pdf.
 
 Poly1305 is a fast, one-time authentication function. It is infeasible for an
 attacker to generate an authenticator for a message without the key. However, a
diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s b/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s
deleted file mode 100644
index f8d4ee9..0000000
--- a/vendor/golang.org/x/crypto/poly1305/poly1305_amd64.s
+++ /dev/null
@@ -1,497 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
-
-// +build amd64,!gccgo,!appengine
-
-// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
-TEXT ·poly1305(SB),0,$224-32
-	MOVQ out+0(FP),DI
-	MOVQ m+8(FP),SI
-	MOVQ mlen+16(FP),DX
-	MOVQ key+24(FP),CX
-
-	MOVQ SP,R11
-	MOVQ $31,R9
-	NOTQ R9
-	ANDQ R9,SP
-	ADDQ $32,SP
-
-	MOVQ R11,32(SP)
-	MOVQ R12,40(SP)
-	MOVQ R13,48(SP)
-	MOVQ R14,56(SP)
-	MOVQ R15,64(SP)
-	MOVQ BX,72(SP)
-	MOVQ BP,80(SP)
-	FLDCW ·ROUNDING(SB)
-	MOVL 0(CX),R8
-	MOVL 4(CX),R9
-	MOVL 8(CX),AX
-	MOVL 12(CX),R10
-	MOVQ DI,88(SP)
-	MOVQ CX,96(SP)
-	MOVL $0X43300000,108(SP)
-	MOVL $0X45300000,116(SP)
-	MOVL $0X47300000,124(SP)
-	MOVL $0X49300000,132(SP)
-	ANDL $0X0FFFFFFF,R8
-	ANDL $0X0FFFFFFC,R9
-	ANDL $0X0FFFFFFC,AX
-	ANDL $0X0FFFFFFC,R10
-	MOVL R8,104(SP)
-	MOVL R9,112(SP)
-	MOVL AX,120(SP)
-	MOVL R10,128(SP)
-	FMOVD 104(SP), F0
-	FSUBD ·DOFFSET0(SB), F0
-	FMOVD 112(SP), F0
-	FSUBD ·DOFFSET1(SB), F0
-	FMOVD 120(SP), F0
-	FSUBD ·DOFFSET2(SB), F0
-	FMOVD 128(SP), F0
-	FSUBD ·DOFFSET3(SB), F0
-	FXCHD F0, F3
-	FMOVDP F0, 136(SP)
-	FXCHD F0, F1
-	FMOVD F0, 144(SP)
-	FMULD ·SCALE(SB), F0
-	FMOVDP F0, 152(SP)
-	FMOVD F0, 160(SP)
-	FMULD ·SCALE(SB), F0
-	FMOVDP F0, 168(SP)
-	FMOVD F0, 176(SP)
-	FMULD ·SCALE(SB), F0
-	FMOVDP F0, 184(SP)
-	FLDZ
-	FLDZ
-	FLDZ
-	FLDZ
-	CMPQ DX,$16
-	JB ADDATMOST15BYTES
-	INITIALATLEAST16BYTES:
-	MOVL 12(SI),DI
-	MOVL 8(SI),CX
-	MOVL 4(SI),R8
-	MOVL 0(SI),R9
-	MOVL DI,128(SP)
-	MOVL CX,120(SP)
-	MOVL R8,112(SP)
-	MOVL R9,104(SP)
-	ADDQ $16,SI
-	SUBQ $16,DX
-	FXCHD F0, F3
-	FADDD 128(SP), F0
-	FSUBD ·DOFFSET3MINUSTWO128(SB), F0
-	FXCHD F0, F1
-	FADDD 112(SP), F0
-	FSUBD ·DOFFSET1(SB), F0
-	FXCHD F0, F2
-	FADDD 120(SP), F0
-	FSUBD ·DOFFSET2(SB), F0
-	FXCHD F0, F3
-	FADDD 104(SP), F0
-	FSUBD ·DOFFSET0(SB), F0
-	CMPQ DX,$16
-	JB MULTIPLYADDATMOST15BYTES
-	MULTIPLYADDATLEAST16BYTES:
-	MOVL 12(SI),DI
-	MOVL 8(SI),CX
-	MOVL 4(SI),R8
-	MOVL 0(SI),R9
-	MOVL DI,128(SP)
-	MOVL CX,120(SP)
-	MOVL R8,112(SP)
-	MOVL R9,104(SP)
-	ADDQ $16,SI
-	SUBQ $16,DX
-	FMOVD ·ALPHA130(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA130(SB), F0
-	FSUBD F0,F2
-	FMULD ·SCALE(SB), F0
-	FMOVD ·ALPHA32(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA32(SB), F0
-	FSUBD F0,F2
-	FXCHD F0, F2
-	FADDDP F0,F1
-	FMOVD ·ALPHA64(SB), F0
-	FADDD F4,F0
-	FSUBD ·ALPHA64(SB), F0
-	FSUBD F0,F4
-	FMOVD ·ALPHA96(SB), F0
-	FADDD F6,F0
-	FSUBD ·ALPHA96(SB), F0
-	FSUBD F0,F6
-	FXCHD F0, F6
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FADDDP F0,F5
-	FXCHD F0, F3
-	FADDDP F0,F1
-	FMOVD 176(SP), F0
-	FMULD F3,F0
-	FMOVD 160(SP), F0
-	FMULD F4,F0
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FMOVD 136(SP), F0
-	FMULDP F0,F6
-	FMOVD 160(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F3
-	FMOVD 144(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULDP F0,F4
-	FXCHD F0, F3
-	FADDDP F0,F5
-	FMOVD 144(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F4,F0
-	FADDDP F0,F3
-	FMOVD 168(SP), F0
-	FMULDP F0,F4
-	FXCHD F0, F3
-	FADDDP F0,F4
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FMOVD 184(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F3
-	FXCHD F0, F1
-	FMOVD 168(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 152(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F1
-	CMPQ DX,$16
-	FXCHD F0, F2
-	FMOVD 128(SP), F0
-	FSUBD ·DOFFSET3MINUSTWO128(SB), F0
-	FADDDP F0,F1
-	FXCHD F0, F1
-	FMOVD 120(SP), F0
-	FSUBD ·DOFFSET2(SB), F0
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FMOVD 112(SP), F0
-	FSUBD ·DOFFSET1(SB), F0
-	FADDDP F0,F1
-	FXCHD F0, F2
-	FMOVD 104(SP), F0
-	FSUBD ·DOFFSET0(SB), F0
-	FADDDP F0,F1
-	JAE MULTIPLYADDATLEAST16BYTES
-	MULTIPLYADDATMOST15BYTES:
-	FMOVD ·ALPHA130(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA130(SB), F0
-	FSUBD F0,F2
-	FMULD ·SCALE(SB), F0
-	FMOVD ·ALPHA32(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA32(SB), F0
-	FSUBD F0,F2
-	FMOVD ·ALPHA64(SB), F0
-	FADDD F5,F0
-	FSUBD ·ALPHA64(SB), F0
-	FSUBD F0,F5
-	FMOVD ·ALPHA96(SB), F0
-	FADDD F7,F0
-	FSUBD ·ALPHA96(SB), F0
-	FSUBD F0,F7
-	FXCHD F0, F7
-	FADDDP F0,F1
-	FXCHD F0, F5
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FADDDP F0,F5
-	FADDDP F0,F1
-	FMOVD 176(SP), F0
-	FMULD F1,F0
-	FMOVD 160(SP), F0
-	FMULD F2,F0
-	FMOVD 144(SP), F0
-	FMULD F3,F0
-	FMOVD 136(SP), F0
-	FMULDP F0,F4
-	FMOVD 160(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F3
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F3
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F4
-	FMOVD 168(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F4
-	FMOVD 168(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F3
-	FMOVD 152(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F1
-	ADDATMOST15BYTES:
-	CMPQ DX,$0
-	JE NOMOREBYTES
-	MOVL $0,0(SP)
-	MOVL $0, 4 (SP)
-	MOVL $0, 8 (SP)
-	MOVL $0, 12 (SP)
-	LEAQ 0(SP),DI
-	MOVQ DX,CX
-	REP; MOVSB
-	MOVB $1,0(DI)
-	MOVL  12 (SP),DI
-	MOVL  8 (SP),SI
-	MOVL  4 (SP),DX
-	MOVL 0(SP),CX
-	MOVL DI,128(SP)
-	MOVL SI,120(SP)
-	MOVL DX,112(SP)
-	MOVL CX,104(SP)
-	FXCHD F0, F3
-	FADDD 128(SP), F0
-	FSUBD ·DOFFSET3(SB), F0
-	FXCHD F0, F2
-	FADDD 120(SP), F0
-	FSUBD ·DOFFSET2(SB), F0
-	FXCHD F0, F1
-	FADDD 112(SP), F0
-	FSUBD ·DOFFSET1(SB), F0
-	FXCHD F0, F3
-	FADDD 104(SP), F0
-	FSUBD ·DOFFSET0(SB), F0
-	FMOVD ·ALPHA130(SB), F0
-	FADDD F3,F0
-	FSUBD ·ALPHA130(SB), F0
-	FSUBD F0,F3
-	FMULD ·SCALE(SB), F0
-	FMOVD ·ALPHA32(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA32(SB), F0
-	FSUBD F0,F2
-	FMOVD ·ALPHA64(SB), F0
-	FADDD F6,F0
-	FSUBD ·ALPHA64(SB), F0
-	FSUBD F0,F6
-	FMOVD ·ALPHA96(SB), F0
-	FADDD F5,F0
-	FSUBD ·ALPHA96(SB), F0
-	FSUBD F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F3
-	FXCHD F0, F6
-	FADDDP F0,F1
-	FXCHD F0, F3
-	FADDDP F0,F5
-	FXCHD F0, F3
-	FADDDP F0,F1
-	FMOVD 176(SP), F0
-	FMULD F3,F0
-	FMOVD 160(SP), F0
-	FMULD F4,F0
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FMOVD 136(SP), F0
-	FMULDP F0,F6
-	FMOVD 160(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F3
-	FMOVD 144(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F5,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULDP F0,F5
-	FXCHD F0, F4
-	FADDDP F0,F5
-	FMOVD 144(SP), F0
-	FMULD F6,F0
-	FADDDP F0,F2
-	FMOVD 136(SP), F0
-	FMULD F6,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F6,F0
-	FADDDP F0,F4
-	FMOVD 168(SP), F0
-	FMULDP F0,F6
-	FXCHD F0, F5
-	FADDDP F0,F4
-	FMOVD 136(SP), F0
-	FMULD F2,F0
-	FADDDP F0,F1
-	FMOVD 184(SP), F0
-	FMULD F2,F0
-	FADDDP F0,F5
-	FMOVD 168(SP), F0
-	FMULD F2,F0
-	FADDDP F0,F3
-	FMOVD 152(SP), F0
-	FMULDP F0,F2
-	FXCHD F0, F1
-	FADDDP F0,F3
-	FXCHD F0, F3
-	FXCHD F0, F2
-	NOMOREBYTES:
-	MOVL $0,R10
-	FMOVD ·ALPHA130(SB), F0
-	FADDD F4,F0
-	FSUBD ·ALPHA130(SB), F0
-	FSUBD F0,F4
-	FMULD ·SCALE(SB), F0
-	FMOVD ·ALPHA32(SB), F0
-	FADDD F2,F0
-	FSUBD ·ALPHA32(SB), F0
-	FSUBD F0,F2
-	FMOVD ·ALPHA64(SB), F0
-	FADDD F4,F0
-	FSUBD ·ALPHA64(SB), F0
-	FSUBD F0,F4
-	FMOVD ·ALPHA96(SB), F0
-	FADDD F6,F0
-	FSUBD ·ALPHA96(SB), F0
-	FXCHD F0, F6
-	FSUBD F6,F0
-	FXCHD F0, F4
-	FADDDP F0,F3
-	FXCHD F0, F4
-	FADDDP F0,F1
-	FXCHD F0, F2
-	FADDDP F0,F3
-	FXCHD F0, F4
-	FADDDP F0,F3
-	FXCHD F0, F3
-	FADDD ·HOFFSET0(SB), F0
-	FXCHD F0, F3
-	FADDD ·HOFFSET1(SB), F0
-	FXCHD F0, F1
-	FADDD ·HOFFSET2(SB), F0
-	FXCHD F0, F2
-	FADDD ·HOFFSET3(SB), F0
-	FXCHD F0, F3
-	FMOVDP F0, 104(SP)
-	FMOVDP F0, 112(SP)
-	FMOVDP F0, 120(SP)
-	FMOVDP F0, 128(SP)
-	MOVL 108(SP),DI
-	ANDL $63,DI
-	MOVL 116(SP),SI
-	ANDL $63,SI
-	MOVL 124(SP),DX
-	ANDL $63,DX
-	MOVL 132(SP),CX
-	ANDL $63,CX
-	MOVL 112(SP),R8
-	ADDL DI,R8
-	MOVQ R8,112(SP)
-	MOVL 120(SP),DI
-	ADCL SI,DI
-	MOVQ DI,120(SP)
-	MOVL 128(SP),DI
-	ADCL DX,DI
-	MOVQ DI,128(SP)
-	MOVL R10,DI
-	ADCL CX,DI
-	MOVQ DI,136(SP)
-	MOVQ $5,DI
-	MOVL 104(SP),SI
-	ADDL SI,DI
-	MOVQ DI,104(SP)
-	MOVL R10,DI
-	MOVQ 112(SP),DX
-	ADCL DX,DI
-	MOVQ DI,112(SP)
-	MOVL R10,DI
-	MOVQ 120(SP),CX
-	ADCL CX,DI
-	MOVQ DI,120(SP)
-	MOVL R10,DI
-	MOVQ 128(SP),R8
-	ADCL R8,DI
-	MOVQ DI,128(SP)
-	MOVQ $0XFFFFFFFC,DI
-	MOVQ 136(SP),R9
-	ADCL R9,DI
-	SARL $16,DI
-	MOVQ DI,R9
-	XORL $0XFFFFFFFF,R9
-	ANDQ DI,SI
-	MOVQ 104(SP),AX
-	ANDQ R9,AX
-	ORQ AX,SI
-	ANDQ DI,DX
-	MOVQ 112(SP),AX
-	ANDQ R9,AX
-	ORQ AX,DX
-	ANDQ DI,CX
-	MOVQ 120(SP),AX
-	ANDQ R9,AX
-	ORQ AX,CX
-	ANDQ DI,R8
-	MOVQ 128(SP),DI
-	ANDQ R9,DI
-	ORQ DI,R8
-	MOVQ 88(SP),DI
-	MOVQ 96(SP),R9
-	ADDL 16(R9),SI
-	ADCL 20(R9),DX
-	ADCL 24(R9),CX
-	ADCL 28(R9),R8
-	MOVL SI,0(DI)
-	MOVL DX,4(DI)
-	MOVL CX,8(DI)
-	MOVL R8,12(DI)
-	MOVQ 32(SP),R11
-	MOVQ 40(SP),R12
-	MOVQ 48(SP),R13
-	MOVQ 56(SP),R14
-	MOVQ 64(SP),R15
-	MOVQ 72(SP),BX
-	MOVQ 80(SP),BP
-	MOVQ R11,SP
-	RET
diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s b/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s
deleted file mode 100644
index c153867..0000000
--- a/vendor/golang.org/x/crypto/poly1305/poly1305_arm.s
+++ /dev/null
@@ -1,379 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This code was translated into a form compatible with 5a from the public
-// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
-
-// +build arm,!gccgo,!appengine
-
-DATA poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
-DATA poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
-DATA poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
-DATA poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
-DATA poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
-GLOBL poly1305_init_constants_armv6<>(SB), 8, $20
-
-// Warning: the linker may use R11 to synthesize certain instructions. Please
-// take care and verify that no synthetic instructions use it.
-
-TEXT poly1305_init_ext_armv6<>(SB),4,$-4
-  MOVM.DB.W [R4-R11], (R13)
-  MOVM.IA.W (R1), [R2-R5]
-  MOVW $poly1305_init_constants_armv6<>(SB), R7
-  MOVW R2, R8
-  MOVW R2>>26, R9
-  MOVW R3>>20, g
-  MOVW R4>>14, R11
-  MOVW R5>>8, R12
-  ORR R3<<6, R9, R9
-  ORR R4<<12, g, g
-  ORR R5<<18, R11, R11
-  MOVM.IA (R7), [R2-R6]
-  AND R8, R2, R2
-  AND R9, R3, R3
-  AND g, R4, R4
-  AND R11, R5, R5
-  AND R12, R6, R6
-  MOVM.IA.W [R2-R6], (R0)
-  EOR R2, R2, R2
-  EOR R3, R3, R3
-  EOR R4, R4, R4
-  EOR R5, R5, R5
-  EOR R6, R6, R6
-  MOVM.IA.W [R2-R6], (R0)
-  MOVM.IA.W (R1), [R2-R5]
-  MOVM.IA [R2-R6], (R0)
-  MOVM.IA.W (R13), [R4-R11]
-  RET
-
-#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
-  MOVBU (offset+0)(Rsrc), Rtmp; \
-  MOVBU Rtmp, (offset+0)(Rdst); \
-  MOVBU (offset+1)(Rsrc), Rtmp; \
-  MOVBU Rtmp, (offset+1)(Rdst); \
-  MOVBU (offset+2)(Rsrc), Rtmp; \
-  MOVBU Rtmp, (offset+2)(Rdst); \
-  MOVBU (offset+3)(Rsrc), Rtmp; \
-  MOVBU Rtmp, (offset+3)(Rdst)
-
-TEXT poly1305_blocks_armv6<>(SB),4,$-4
-  MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13)
-  SUB $128, R13
-  MOVW R0, 36(R13)
-  MOVW R1, 40(R13)
-  MOVW R2, 44(R13)
-  MOVW R1, R14
-  MOVW R2, R12
-  MOVW 56(R0), R8
-  WORD $0xe1180008 // TST R8, R8 not working see issue 5921
-  EOR R6, R6, R6
-  MOVW.EQ $(1<<24), R6
-  MOVW R6, 32(R13)
-  ADD $64, R13, g
-  MOVM.IA (R0), [R0-R9]
-  MOVM.IA [R0-R4], (g)
-  CMP $16, R12
-  BLO poly1305_blocks_armv6_done
-poly1305_blocks_armv6_mainloop:
-  WORD $0xe31e0003 // TST R14, #3 not working see issue 5921
-  BEQ poly1305_blocks_armv6_mainloop_aligned
-  ADD $48, R13, g
-  MOVW_UNALIGNED(R14, g, R0, 0)
-  MOVW_UNALIGNED(R14, g, R0, 4)
-  MOVW_UNALIGNED(R14, g, R0, 8)
-  MOVW_UNALIGNED(R14, g, R0, 12)
-  MOVM.IA (g), [R0-R3]
-  ADD $16, R14
-  B poly1305_blocks_armv6_mainloop_loaded
-poly1305_blocks_armv6_mainloop_aligned:
-  MOVM.IA.W (R14), [R0-R3]
-poly1305_blocks_armv6_mainloop_loaded:
-  MOVW R0>>26, g
-  MOVW R1>>20, R11
-  MOVW R2>>14, R12
-  MOVW R14, 40(R13)
-  MOVW R3>>8, R4
-  ORR R1<<6, g, g
-  ORR R2<<12, R11, R11
-  ORR R3<<18, R12, R12
-  BIC $0xfc000000, R0, R0
-  BIC $0xfc000000, g, g
-  MOVW 32(R13), R3
-  BIC $0xfc000000, R11, R11
-  BIC $0xfc000000, R12, R12
-  ADD R0, R5, R5
-  ADD g, R6, R6
-  ORR R3, R4, R4
-  ADD R11, R7, R7
-  ADD $64, R13, R14
-  ADD R12, R8, R8
-  ADD R4, R9, R9
-  MOVM.IA (R14), [R0-R4]
-  MULLU R4, R5, (R11, g)
-  MULLU R3, R5, (R14, R12)
-  MULALU R3, R6, (R11, g)
-  MULALU R2, R6, (R14, R12)
-  MULALU R2, R7, (R11, g)
-  MULALU R1, R7, (R14, R12)
-  ADD R4<<2, R4, R4
-  ADD R3<<2, R3, R3
-  MULALU R1, R8, (R11, g)
-  MULALU R0, R8, (R14, R12)
-  MULALU R0, R9, (R11, g)
-  MULALU R4, R9, (R14, R12)
-  MOVW g, 24(R13)
-  MOVW R11, 28(R13)
-  MOVW R12, 16(R13)
-  MOVW R14, 20(R13)
-  MULLU R2, R5, (R11, g)
-  MULLU R1, R5, (R14, R12)
-  MULALU R1, R6, (R11, g)
-  MULALU R0, R6, (R14, R12)
-  MULALU R0, R7, (R11, g)
-  MULALU R4, R7, (R14, R12)
-  ADD R2<<2, R2, R2
-  ADD R1<<2, R1, R1
-  MULALU R4, R8, (R11, g)
-  MULALU R3, R8, (R14, R12)
-  MULALU R3, R9, (R11, g)
-  MULALU R2, R9, (R14, R12)
-  MOVW g, 8(R13)
-  MOVW R11, 12(R13)
-  MOVW R12, 0(R13)
-  MOVW R14, w+4(SP)
-  MULLU R0, R5, (R11, g)
-  MULALU R4, R6, (R11, g)
-  MULALU R3, R7, (R11, g)
-  MULALU R2, R8, (R11, g)
-  MULALU R1, R9, (R11, g)
-  MOVM.IA (R13), [R0-R7]
-  MOVW g>>26, R12
-  MOVW R4>>26, R14
-  ORR R11<<6, R12, R12
-  ORR R5<<6, R14, R14
-  BIC $0xfc000000, g, g
-  BIC $0xfc000000, R4, R4
-  ADD.S R12, R0, R0
-  ADC $0, R1, R1
-  ADD.S R14, R6, R6
-  ADC $0, R7, R7
-  MOVW R0>>26, R12
-  MOVW R6>>26, R14
-  ORR R1<<6, R12, R12
-  ORR R7<<6, R14, R14
-  BIC $0xfc000000, R0, R0
-  BIC $0xfc000000, R6, R6
-  ADD R14<<2, R14, R14
-  ADD.S R12, R2, R2
-  ADC $0, R3, R3
-  ADD R14, g, g
-  MOVW R2>>26, R12
-  MOVW g>>26, R14
-  ORR R3<<6, R12, R12
-  BIC $0xfc000000, g, R5
-  BIC $0xfc000000, R2, R7
-  ADD R12, R4, R4
-  ADD R14, R0, R0
-  MOVW R4>>26, R12
-  BIC $0xfc000000, R4, R8
-  ADD R12, R6, R9
-  MOVW w+44(SP), R12
-  MOVW w+40(SP), R14
-  MOVW R0, R6
-  CMP $32, R12
-  SUB $16, R12, R12
-  MOVW R12, 44(R13)
-  BHS poly1305_blocks_armv6_mainloop
-poly1305_blocks_armv6_done:
-  MOVW 36(R13), R12
-  MOVW R5, 20(R12)
-  MOVW R6, 24(R12)
-  MOVW R7, 28(R12)
-  MOVW R8, 32(R12)
-  MOVW R9, 36(R12)
-  ADD $128, R13, R13
-  MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14]
-  RET
-
-#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
-  MOVBU.P 1(Rsrc), Rtmp; \
-  MOVBU.P Rtmp, 1(Rdst); \
-  MOVBU.P 1(Rsrc), Rtmp; \
-  MOVBU.P Rtmp, 1(Rdst)
-
-#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
-  MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
-  MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
-
-TEXT poly1305_finish_ext_armv6<>(SB),4,$-4
-  MOVM.DB.W [R4, R5, R6, R7, R8, R9, g, R11, R14], (R13)
-  SUB $16, R13, R13
-  MOVW R0, R5
-  MOVW R1, R6
-  MOVW R2, R7
-  MOVW R3, R8
-  AND.S R2, R2, R2
-  BEQ poly1305_finish_ext_armv6_noremaining
-  EOR R0, R0
-  MOVW R13, R9
-  MOVW R0, 0(R13)
-  MOVW R0, 4(R13)
-  MOVW R0, 8(R13)
-  MOVW R0, 12(R13)
-  WORD $0xe3110003 // TST R1, #3 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_aligned
-  WORD $0xe3120008 // TST R2, #8 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip8
-  MOVWP_UNALIGNED(R1, R9, g)
-  MOVWP_UNALIGNED(R1, R9, g)
-poly1305_finish_ext_armv6_skip8:
-  WORD $0xe3120004 // TST $4, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip4
-  MOVWP_UNALIGNED(R1, R9, g)
-poly1305_finish_ext_armv6_skip4:
-  WORD $0xe3120002 // TST $2, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip2
-  MOVHUP_UNALIGNED(R1, R9, g)
-  B poly1305_finish_ext_armv6_skip2
-poly1305_finish_ext_armv6_aligned:
-  WORD $0xe3120008 // TST R2, #8 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip8_aligned
-  MOVM.IA.W (R1), [g-R11]
-  MOVM.IA.W [g-R11], (R9)
-poly1305_finish_ext_armv6_skip8_aligned:
-  WORD $0xe3120004 // TST $4, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip4_aligned
-  MOVW.P 4(R1), g
-  MOVW.P g, 4(R9)
-poly1305_finish_ext_armv6_skip4_aligned:
-  WORD $0xe3120002 // TST $2, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip2
-  MOVHU.P 2(R1), g
-  MOVH.P g, 2(R9)
-poly1305_finish_ext_armv6_skip2:
-  WORD $0xe3120001 // TST $1, R2 not working see issue 5921
-  BEQ poly1305_finish_ext_armv6_skip1
-  MOVBU.P 1(R1), g
-  MOVBU.P g, 1(R9)
-poly1305_finish_ext_armv6_skip1:
-  MOVW $1, R11
-  MOVBU R11, 0(R9)
-  MOVW R11, 56(R5)
-  MOVW R5, R0
-  MOVW R13, R1
-  MOVW $16, R2
-  BL poly1305_blocks_armv6<>(SB)
-poly1305_finish_ext_armv6_noremaining:
-  MOVW 20(R5), R0
-  MOVW 24(R5), R1
-  MOVW 28(R5), R2
-  MOVW 32(R5), R3
-  MOVW 36(R5), R4
-  MOVW R4>>26, R12
-  BIC $0xfc000000, R4, R4
-  ADD R12<<2, R12, R12
-  ADD R12, R0, R0
-  MOVW R0>>26, R12
-  BIC $0xfc000000, R0, R0
-  ADD R12, R1, R1
-  MOVW R1>>26, R12
-  BIC $0xfc000000, R1, R1
-  ADD R12, R2, R2
-  MOVW R2>>26, R12
-  BIC $0xfc000000, R2, R2
-  ADD R12, R3, R3
-  MOVW R3>>26, R12
-  BIC $0xfc000000, R3, R3
-  ADD R12, R4, R4
-  ADD $5, R0, R6
-  MOVW R6>>26, R12
-  BIC $0xfc000000, R6, R6
-  ADD R12, R1, R7
-  MOVW R7>>26, R12
-  BIC $0xfc000000, R7, R7
-  ADD R12, R2, g
-  MOVW g>>26, R12
-  BIC $0xfc000000, g, g
-  ADD R12, R3, R11
-  MOVW $-(1<<26), R12
-  ADD R11>>26, R12, R12
-  BIC $0xfc000000, R11, R11
-  ADD R12, R4, R14
-  MOVW R14>>31, R12
-  SUB $1, R12
-  AND R12, R6, R6
-  AND R12, R7, R7
-  AND R12, g, g
-  AND R12, R11, R11
-  AND R12, R14, R14
-  MVN R12, R12
-  AND R12, R0, R0
-  AND R12, R1, R1
-  AND R12, R2, R2
-  AND R12, R3, R3
-  AND R12, R4, R4
-  ORR R6, R0, R0
-  ORR R7, R1, R1
-  ORR g, R2, R2
-  ORR R11, R3, R3
-  ORR R14, R4, R4
-  ORR R1<<26, R0, R0
-  MOVW R1>>6, R1
-  ORR R2<<20, R1, R1
-  MOVW R2>>12, R2
-  ORR R3<<14, R2, R2
-  MOVW R3>>18, R3
-  ORR R4<<8, R3, R3
-  MOVW 40(R5), R6
-  MOVW 44(R5), R7
-  MOVW 48(R5), g
-  MOVW 52(R5), R11
-  ADD.S R6, R0, R0
-  ADC.S R7, R1, R1
-  ADC.S g, R2, R2
-  ADC.S R11, R3, R3
-  MOVM.IA [R0-R3], (R8)
-  MOVW R5, R12
-  EOR R0, R0, R0
-  EOR R1, R1, R1
-  EOR R2, R2, R2
-  EOR R3, R3, R3
-  EOR R4, R4, R4
-  EOR R5, R5, R5
-  EOR R6, R6, R6
-  EOR R7, R7, R7
-  MOVM.IA.W [R0-R7], (R12)
-  MOVM.IA [R0-R7], (R12)
-  ADD $16, R13, R13
-  MOVM.IA.W (R13), [R4, R5, R6, R7, R8, R9, g, R11, R14]
-  RET
-
-// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
-TEXT ·poly1305_auth_armv6(SB),0,$280-16
-  MOVW  out+0(FP), R4
-  MOVW  m+4(FP), R5
-  MOVW  mlen+8(FP), R6
-  MOVW  key+12(FP), R7
-
-  MOVW R13, R8
-  BIC $63, R13
-  SUB $64, R13, R13
-  MOVW  R13, R0
-  MOVW  R7, R1
-  BL poly1305_init_ext_armv6<>(SB)
-  BIC.S $15, R6, R2
-  BEQ poly1305_auth_armv6_noblocks
-  MOVW R13, R0
-  MOVW R5, R1
-  ADD R2, R5, R5
-  SUB R2, R6, R6
-  BL poly1305_blocks_armv6<>(SB)
-poly1305_auth_armv6_noblocks:
-  MOVW R13, R0
-  MOVW R5, R1
-  MOVW R6, R2
-  MOVW R4, R3
-  BL poly1305_finish_ext_armv6<>(SB)
-  MOVW R8, R13
-  RET
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
index 6775c70..4dd72fe 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go
@@ -6,10 +6,8 @@
 
 package poly1305
 
-// This function is implemented in poly1305_amd64.s
-
+// This function is implemented in sum_amd64.s
 //go:noescape
-
 func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
 
 // Sum generates an authenticator for m using a one-time key and puts the
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s
new file mode 100644
index 0000000..2edae63
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s
@@ -0,0 +1,125 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build amd64,!gccgo,!appengine
+
+#include "textflag.h"
+
+#define POLY1305_ADD(msg, h0, h1, h2) \
+	ADDQ 0(msg), h0;  \
+	ADCQ 8(msg), h1;  \
+	ADCQ $1, h2;      \
+	LEAQ 16(msg), msg
+
+#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \
+	MOVQ  r0, AX;                  \
+	MULQ  h0;                      \
+	MOVQ  AX, t0;                  \
+	MOVQ  DX, t1;                  \
+	MOVQ  r0, AX;                  \
+	MULQ  h1;                      \
+	ADDQ  AX, t1;                  \
+	ADCQ  $0, DX;                  \
+	MOVQ  r0, t2;                  \
+	IMULQ h2, t2;                  \
+	ADDQ  DX, t2;                  \
+	                               \
+	MOVQ  r1, AX;                  \
+	MULQ  h0;                      \
+	ADDQ  AX, t1;                  \
+	ADCQ  $0, DX;                  \
+	MOVQ  DX, h0;                  \
+	MOVQ  r1, t3;                  \
+	IMULQ h2, t3;                  \
+	MOVQ  r1, AX;                  \
+	MULQ  h1;                      \
+	ADDQ  AX, t2;                  \
+	ADCQ  DX, t3;                  \
+	ADDQ  h0, t2;                  \
+	ADCQ  $0, t3;                  \
+	                               \
+	MOVQ  t0, h0;                  \
+	MOVQ  t1, h1;                  \
+	MOVQ  t2, h2;                  \
+	ANDQ  $3, h2;                  \
+	MOVQ  t2, t0;                  \
+	ANDQ  $0xFFFFFFFFFFFFFFFC, t0; \
+	ADDQ  t0, h0;                  \
+	ADCQ  t3, h1;                  \
+	ADCQ  $0, h2;                  \
+	SHRQ  $2, t3, t2;              \
+	SHRQ  $2, t3;                  \
+	ADDQ  t2, h0;                  \
+	ADCQ  t3, h1;                  \
+	ADCQ  $0, h2
+
+DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF
+DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC
+GLOBL ·poly1305Mask<>(SB), RODATA, $16
+
+// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key)
+TEXT ·poly1305(SB), $0-32
+	MOVQ out+0(FP), DI
+	MOVQ m+8(FP), SI
+	MOVQ mlen+16(FP), R15
+	MOVQ key+24(FP), AX
+
+	MOVQ 0(AX), R11
+	MOVQ 8(AX), R12
+	ANDQ ·poly1305Mask<>(SB), R11   // r0
+	ANDQ ·poly1305Mask<>+8(SB), R12 // r1
+	XORQ R8, R8                    // h0
+	XORQ R9, R9                    // h1
+	XORQ R10, R10                  // h2
+
+	CMPQ R15, $16
+	JB   bytes_between_0_and_15
+
+loop:
+	POLY1305_ADD(SI, R8, R9, R10)
+
+multiply:
+	POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14)
+	SUBQ $16, R15
+	CMPQ R15, $16
+	JAE  loop
+
+bytes_between_0_and_15:
+	TESTQ R15, R15
+	JZ    done
+	MOVQ  $1, BX
+	XORQ  CX, CX
+	XORQ  R13, R13
+	ADDQ  R15, SI
+
+flush_buffer:
+	SHLQ $8, BX, CX
+	SHLQ $8, BX
+	MOVB -1(SI), R13
+	XORQ R13, BX
+	DECQ SI
+	DECQ R15
+	JNZ  flush_buffer
+
+	ADDQ BX, R8
+	ADCQ CX, R9
+	ADCQ $0, R10
+	MOVQ $16, R15
+	JMP  multiply
+
+done:
+	MOVQ    R8, AX
+	MOVQ    R9, BX
+	SUBQ    $0xFFFFFFFFFFFFFFFB, AX
+	SBBQ    $0xFFFFFFFFFFFFFFFF, BX
+	SBBQ    $3, R10
+	CMOVQCS R8, AX
+	CMOVQCS R9, BX
+	MOVQ    key+24(FP), R8
+	ADDQ    16(R8), AX
+	ADCQ    24(R8), BX
+
+	MOVQ AX, 0(DI)
+	MOVQ BX, 8(DI)
+	RET
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go
index 50b979c..5dc321c 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_arm.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.go
@@ -2,14 +2,12 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build arm,!gccgo,!appengine
+// +build arm,!gccgo,!appengine,!nacl
 
 package poly1305
 
-// This function is implemented in poly1305_arm.s
-
+// This function is implemented in sum_arm.s
 //go:noescape
-
 func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte)
 
 // Sum generates an authenticator for m using a one-time key and puts the
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.s b/vendor/golang.org/x/crypto/poly1305/sum_arm.s
new file mode 100644
index 0000000..f70b4ac
--- /dev/null
+++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.s
@@ -0,0 +1,427 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm,!gccgo,!appengine,!nacl
+
+#include "textflag.h"
+
+// This code was translated into a form compatible with 5a from the public
+// domain source by Andrew Moon: github.com/floodyberry/poly1305-opt/blob/master/app/extensions/poly1305.
+
+DATA ·poly1305_init_constants_armv6<>+0x00(SB)/4, $0x3ffffff
+DATA ·poly1305_init_constants_armv6<>+0x04(SB)/4, $0x3ffff03
+DATA ·poly1305_init_constants_armv6<>+0x08(SB)/4, $0x3ffc0ff
+DATA ·poly1305_init_constants_armv6<>+0x0c(SB)/4, $0x3f03fff
+DATA ·poly1305_init_constants_armv6<>+0x10(SB)/4, $0x00fffff
+GLOBL ·poly1305_init_constants_armv6<>(SB), 8, $20
+
+// Warning: the linker may use R11 to synthesize certain instructions. Please
+// take care and verify that no synthetic instructions use it.
+
+TEXT poly1305_init_ext_armv6<>(SB), NOSPLIT, $0
+	// Needs 16 bytes of stack and 64 bytes of space pointed to by R0.  (It
+	// might look like it's only 60 bytes of space but the final four bytes
+	// will be written by another function.) We need to skip over four
+	// bytes of stack because that's saving the value of 'g'.
+	ADD       $4, R13, R8
+	MOVM.IB   [R4-R7], (R8)
+	MOVM.IA.W (R1), [R2-R5]
+	MOVW      $·poly1305_init_constants_armv6<>(SB), R7
+	MOVW      R2, R8
+	MOVW      R2>>26, R9
+	MOVW      R3>>20, g
+	MOVW      R4>>14, R11
+	MOVW      R5>>8, R12
+	ORR       R3<<6, R9, R9
+	ORR       R4<<12, g, g
+	ORR       R5<<18, R11, R11
+	MOVM.IA   (R7), [R2-R6]
+	AND       R8, R2, R2
+	AND       R9, R3, R3
+	AND       g, R4, R4
+	AND       R11, R5, R5
+	AND       R12, R6, R6
+	MOVM.IA.W [R2-R6], (R0)
+	EOR       R2, R2, R2
+	EOR       R3, R3, R3
+	EOR       R4, R4, R4
+	EOR       R5, R5, R5
+	EOR       R6, R6, R6
+	MOVM.IA.W [R2-R6], (R0)
+	MOVM.IA.W (R1), [R2-R5]
+	MOVM.IA   [R2-R6], (R0)
+	ADD       $20, R13, R0
+	MOVM.DA   (R0), [R4-R7]
+	RET
+
+#define MOVW_UNALIGNED(Rsrc, Rdst, Rtmp, offset) \
+	MOVBU (offset+0)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+0)(Rdst); \
+	MOVBU (offset+1)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+1)(Rdst); \
+	MOVBU (offset+2)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+2)(Rdst); \
+	MOVBU (offset+3)(Rsrc), Rtmp; \
+	MOVBU Rtmp, (offset+3)(Rdst)
+
+TEXT poly1305_blocks_armv6<>(SB), NOSPLIT, $0
+	// Needs 24 bytes of stack for saved registers and then 88 bytes of
+	// scratch space after that. We assume that 24 bytes at (R13) have
+	// already been used: four bytes for the link register saved in the
+	// prelude of poly1305_auth_armv6, four bytes for saving the value of g
+	// in that function and 16 bytes of scratch space used around
+	// poly1305_finish_ext_armv6_skip1.
+	ADD     $24, R13, R12
+	MOVM.IB [R4-R8, R14], (R12)
+	MOVW    R0, 88(R13)
+	MOVW    R1, 92(R13)
+	MOVW    R2, 96(R13)
+	MOVW    R1, R14
+	MOVW    R2, R12
+	MOVW    56(R0), R8
+	WORD    $0xe1180008                // TST R8, R8 not working see issue 5921
+	EOR     R6, R6, R6
+	MOVW.EQ $(1<<24), R6
+	MOVW    R6, 84(R13)
+	ADD     $116, R13, g
+	MOVM.IA (R0), [R0-R9]
+	MOVM.IA [R0-R4], (g)
+	CMP     $16, R12
+	BLO     poly1305_blocks_armv6_done
+
+poly1305_blocks_armv6_mainloop:
+	WORD    $0xe31e0003                            // TST R14, #3 not working see issue 5921
+	BEQ     poly1305_blocks_armv6_mainloop_aligned
+	ADD     $100, R13, g
+	MOVW_UNALIGNED(R14, g, R0, 0)
+	MOVW_UNALIGNED(R14, g, R0, 4)
+	MOVW_UNALIGNED(R14, g, R0, 8)
+	MOVW_UNALIGNED(R14, g, R0, 12)
+	MOVM.IA (g), [R0-R3]
+	ADD     $16, R14
+	B       poly1305_blocks_armv6_mainloop_loaded
+
+poly1305_blocks_armv6_mainloop_aligned:
+	MOVM.IA.W (R14), [R0-R3]
+
+poly1305_blocks_armv6_mainloop_loaded:
+	MOVW    R0>>26, g
+	MOVW    R1>>20, R11
+	MOVW    R2>>14, R12
+	MOVW    R14, 92(R13)
+	MOVW    R3>>8, R4
+	ORR     R1<<6, g, g
+	ORR     R2<<12, R11, R11
+	ORR     R3<<18, R12, R12
+	BIC     $0xfc000000, R0, R0
+	BIC     $0xfc000000, g, g
+	MOVW    84(R13), R3
+	BIC     $0xfc000000, R11, R11
+	BIC     $0xfc000000, R12, R12
+	ADD     R0, R5, R5
+	ADD     g, R6, R6
+	ORR     R3, R4, R4
+	ADD     R11, R7, R7
+	ADD     $116, R13, R14
+	ADD     R12, R8, R8
+	ADD     R4, R9, R9
+	MOVM.IA (R14), [R0-R4]
+	MULLU   R4, R5, (R11, g)
+	MULLU   R3, R5, (R14, R12)
+	MULALU  R3, R6, (R11, g)
+	MULALU  R2, R6, (R14, R12)
+	MULALU  R2, R7, (R11, g)
+	MULALU  R1, R7, (R14, R12)
+	ADD     R4<<2, R4, R4
+	ADD     R3<<2, R3, R3
+	MULALU  R1, R8, (R11, g)
+	MULALU  R0, R8, (R14, R12)
+	MULALU  R0, R9, (R11, g)
+	MULALU  R4, R9, (R14, R12)
+	MOVW    g, 76(R13)
+	MOVW    R11, 80(R13)
+	MOVW    R12, 68(R13)
+	MOVW    R14, 72(R13)
+	MULLU   R2, R5, (R11, g)
+	MULLU   R1, R5, (R14, R12)
+	MULALU  R1, R6, (R11, g)
+	MULALU  R0, R6, (R14, R12)
+	MULALU  R0, R7, (R11, g)
+	MULALU  R4, R7, (R14, R12)
+	ADD     R2<<2, R2, R2
+	ADD     R1<<2, R1, R1
+	MULALU  R4, R8, (R11, g)
+	MULALU  R3, R8, (R14, R12)
+	MULALU  R3, R9, (R11, g)
+	MULALU  R2, R9, (R14, R12)
+	MOVW    g, 60(R13)
+	MOVW    R11, 64(R13)
+	MOVW    R12, 52(R13)
+	MOVW    R14, 56(R13)
+	MULLU   R0, R5, (R11, g)
+	MULALU  R4, R6, (R11, g)
+	MULALU  R3, R7, (R11, g)
+	MULALU  R2, R8, (R11, g)
+	MULALU  R1, R9, (R11, g)
+	ADD     $52, R13, R0
+	MOVM.IA (R0), [R0-R7]
+	MOVW    g>>26, R12
+	MOVW    R4>>26, R14
+	ORR     R11<<6, R12, R12
+	ORR     R5<<6, R14, R14
+	BIC     $0xfc000000, g, g
+	BIC     $0xfc000000, R4, R4
+	ADD.S   R12, R0, R0
+	ADC     $0, R1, R1
+	ADD.S   R14, R6, R6
+	ADC     $0, R7, R7
+	MOVW    R0>>26, R12
+	MOVW    R6>>26, R14
+	ORR     R1<<6, R12, R12
+	ORR     R7<<6, R14, R14
+	BIC     $0xfc000000, R0, R0
+	BIC     $0xfc000000, R6, R6
+	ADD     R14<<2, R14, R14
+	ADD.S   R12, R2, R2
+	ADC     $0, R3, R3
+	ADD     R14, g, g
+	MOVW    R2>>26, R12
+	MOVW    g>>26, R14
+	ORR     R3<<6, R12, R12
+	BIC     $0xfc000000, g, R5
+	BIC     $0xfc000000, R2, R7
+	ADD     R12, R4, R4
+	ADD     R14, R0, R0
+	MOVW    R4>>26, R12
+	BIC     $0xfc000000, R4, R8
+	ADD     R12, R6, R9
+	MOVW    96(R13), R12
+	MOVW    92(R13), R14
+	MOVW    R0, R6
+	CMP     $32, R12
+	SUB     $16, R12, R12
+	MOVW    R12, 96(R13)
+	BHS     poly1305_blocks_armv6_mainloop
+
+poly1305_blocks_armv6_done:
+	MOVW    88(R13), R12
+	MOVW    R5, 20(R12)
+	MOVW    R6, 24(R12)
+	MOVW    R7, 28(R12)
+	MOVW    R8, 32(R12)
+	MOVW    R9, 36(R12)
+	ADD     $48, R13, R0
+	MOVM.DA (R0), [R4-R8, R14]
+	RET
+
+#define MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+	MOVBU.P 1(Rsrc), Rtmp; \
+	MOVBU.P Rtmp, 1(Rdst); \
+	MOVBU.P 1(Rsrc), Rtmp; \
+	MOVBU.P Rtmp, 1(Rdst)
+
+#define MOVWP_UNALIGNED(Rsrc, Rdst, Rtmp) \
+	MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp); \
+	MOVHUP_UNALIGNED(Rsrc, Rdst, Rtmp)
+
+// func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]key)
+TEXT ·poly1305_auth_armv6(SB), $196-16
+	// The value 196, just above, is the sum of 64 (the size of the context
+	// structure) and 132 (the amount of stack needed).
+	//
+	// At this point, the stack pointer (R13) has been moved down. It
+	// points to the saved link register and there's 196 bytes of free
+	// space above it.
+	//
+	// The stack for this function looks like:
+	//
+	// +---------------------
+	// |
+	// | 64 bytes of context structure
+	// |
+	// +---------------------
+	// |
+	// | 112 bytes for poly1305_blocks_armv6
+	// |
+	// +---------------------
+	// | 16 bytes of final block, constructed at
+	// | poly1305_finish_ext_armv6_skip8
+	// +---------------------
+	// | four bytes of saved 'g'
+	// +---------------------
+	// | lr, saved by prelude    <- R13 points here
+	// +---------------------
+	MOVW g, 4(R13)
+
+	MOVW out+0(FP), R4
+	MOVW m+4(FP), R5
+	MOVW mlen+8(FP), R6
+	MOVW key+12(FP), R7
+
+	ADD  $136, R13, R0 // 136 = 4 + 4 + 16 + 112
+	MOVW R7, R1
+
+	// poly1305_init_ext_armv6 will write to the stack from R13+4, but
+	// that's ok because none of the other values have been written yet.
+	BL    poly1305_init_ext_armv6<>(SB)
+	BIC.S $15, R6, R2
+	BEQ   poly1305_auth_armv6_noblocks
+	ADD   $136, R13, R0
+	MOVW  R5, R1
+	ADD   R2, R5, R5
+	SUB   R2, R6, R6
+	BL    poly1305_blocks_armv6<>(SB)
+
+poly1305_auth_armv6_noblocks:
+	ADD  $136, R13, R0
+	MOVW R5, R1
+	MOVW R6, R2
+	MOVW R4, R3
+
+	MOVW  R0, R5
+	MOVW  R1, R6
+	MOVW  R2, R7
+	MOVW  R3, R8
+	AND.S R2, R2, R2
+	BEQ   poly1305_finish_ext_armv6_noremaining
+	EOR   R0, R0
+	ADD   $8, R13, R9                           // 8 = offset to 16 byte scratch space
+	MOVW  R0, (R9)
+	MOVW  R0, 4(R9)
+	MOVW  R0, 8(R9)
+	MOVW  R0, 12(R9)
+	WORD  $0xe3110003                           // TST R1, #3 not working see issue 5921
+	BEQ   poly1305_finish_ext_armv6_aligned
+	WORD  $0xe3120008                           // TST R2, #8 not working see issue 5921
+	BEQ   poly1305_finish_ext_armv6_skip8
+	MOVWP_UNALIGNED(R1, R9, g)
+	MOVWP_UNALIGNED(R1, R9, g)
+
+poly1305_finish_ext_armv6_skip8:
+	WORD $0xe3120004                     // TST $4, R2 not working see issue 5921
+	BEQ  poly1305_finish_ext_armv6_skip4
+	MOVWP_UNALIGNED(R1, R9, g)
+
+poly1305_finish_ext_armv6_skip4:
+	WORD $0xe3120002                     // TST $2, R2 not working see issue 5921
+	BEQ  poly1305_finish_ext_armv6_skip2
+	MOVHUP_UNALIGNED(R1, R9, g)
+	B    poly1305_finish_ext_armv6_skip2
+
+poly1305_finish_ext_armv6_aligned:
+	WORD      $0xe3120008                             // TST R2, #8 not working see issue 5921
+	BEQ       poly1305_finish_ext_armv6_skip8_aligned
+	MOVM.IA.W (R1), [g-R11]
+	MOVM.IA.W [g-R11], (R9)
+
+poly1305_finish_ext_armv6_skip8_aligned:
+	WORD   $0xe3120004                             // TST $4, R2 not working see issue 5921
+	BEQ    poly1305_finish_ext_armv6_skip4_aligned
+	MOVW.P 4(R1), g
+	MOVW.P g, 4(R9)
+
+poly1305_finish_ext_armv6_skip4_aligned:
+	WORD    $0xe3120002                     // TST $2, R2 not working see issue 5921
+	BEQ     poly1305_finish_ext_armv6_skip2
+	MOVHU.P 2(R1), g
+	MOVH.P  g, 2(R9)
+
+poly1305_finish_ext_armv6_skip2:
+	WORD    $0xe3120001                     // TST $1, R2 not working see issue 5921
+	BEQ     poly1305_finish_ext_armv6_skip1
+	MOVBU.P 1(R1), g
+	MOVBU.P g, 1(R9)
+
+poly1305_finish_ext_armv6_skip1:
+	MOVW  $1, R11
+	MOVBU R11, 0(R9)
+	MOVW  R11, 56(R5)
+	MOVW  R5, R0
+	ADD   $8, R13, R1
+	MOVW  $16, R2
+	BL    poly1305_blocks_armv6<>(SB)
+
+poly1305_finish_ext_armv6_noremaining:
+	MOVW      20(R5), R0
+	MOVW      24(R5), R1
+	MOVW      28(R5), R2
+	MOVW      32(R5), R3
+	MOVW      36(R5), R4
+	MOVW      R4>>26, R12
+	BIC       $0xfc000000, R4, R4
+	ADD       R12<<2, R12, R12
+	ADD       R12, R0, R0
+	MOVW      R0>>26, R12
+	BIC       $0xfc000000, R0, R0
+	ADD       R12, R1, R1
+	MOVW      R1>>26, R12
+	BIC       $0xfc000000, R1, R1
+	ADD       R12, R2, R2
+	MOVW      R2>>26, R12
+	BIC       $0xfc000000, R2, R2
+	ADD       R12, R3, R3
+	MOVW      R3>>26, R12
+	BIC       $0xfc000000, R3, R3
+	ADD       R12, R4, R4
+	ADD       $5, R0, R6
+	MOVW      R6>>26, R12
+	BIC       $0xfc000000, R6, R6
+	ADD       R12, R1, R7
+	MOVW      R7>>26, R12
+	BIC       $0xfc000000, R7, R7
+	ADD       R12, R2, g
+	MOVW      g>>26, R12
+	BIC       $0xfc000000, g, g
+	ADD       R12, R3, R11
+	MOVW      $-(1<<26), R12
+	ADD       R11>>26, R12, R12
+	BIC       $0xfc000000, R11, R11
+	ADD       R12, R4, R9
+	MOVW      R9>>31, R12
+	SUB       $1, R12
+	AND       R12, R6, R6
+	AND       R12, R7, R7
+	AND       R12, g, g
+	AND       R12, R11, R11
+	AND       R12, R9, R9
+	MVN       R12, R12
+	AND       R12, R0, R0
+	AND       R12, R1, R1
+	AND       R12, R2, R2
+	AND       R12, R3, R3
+	AND       R12, R4, R4
+	ORR       R6, R0, R0
+	ORR       R7, R1, R1
+	ORR       g, R2, R2
+	ORR       R11, R3, R3
+	ORR       R9, R4, R4
+	ORR       R1<<26, R0, R0
+	MOVW      R1>>6, R1
+	ORR       R2<<20, R1, R1
+	MOVW      R2>>12, R2
+	ORR       R3<<14, R2, R2
+	MOVW      R3>>18, R3
+	ORR       R4<<8, R3, R3
+	MOVW      40(R5), R6
+	MOVW      44(R5), R7
+	MOVW      48(R5), g
+	MOVW      52(R5), R11
+	ADD.S     R6, R0, R0
+	ADC.S     R7, R1, R1
+	ADC.S     g, R2, R2
+	ADC.S     R11, R3, R3
+	MOVM.IA   [R0-R3], (R8)
+	MOVW      R5, R12
+	EOR       R0, R0, R0
+	EOR       R1, R1, R1
+	EOR       R2, R2, R2
+	EOR       R3, R3, R3
+	EOR       R4, R4, R4
+	EOR       R5, R5, R5
+	EOR       R6, R6, R6
+	EOR       R7, R7, R7
+	MOVM.IA.W [R0-R7], (R12)
+	MOVM.IA   [R0-R7], (R12)
+	MOVW      4(R13), g
+	RET
diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ref.go b/vendor/golang.org/x/crypto/poly1305/sum_ref.go
index 0b24fc7..b2805a5 100644
--- a/vendor/golang.org/x/crypto/poly1305/sum_ref.go
+++ b/vendor/golang.org/x/crypto/poly1305/sum_ref.go
@@ -2,1530 +2,140 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build !amd64,!arm gccgo appengine
+// +build !amd64,!arm gccgo appengine nacl
 
 package poly1305
 
-// Based on original, public domain implementation from NaCl by D. J.
-// Bernstein.
+import "encoding/binary"
 
-import "math"
-
-const (
-	alpham80 = 0.00000000558793544769287109375
-	alpham48 = 24.0
-	alpham16 = 103079215104.0
-	alpha0   = 6755399441055744.0
-	alpha18  = 1770887431076116955136.0
-	alpha32  = 29014219670751100192948224.0
-	alpha50  = 7605903601369376408980219232256.0
-	alpha64  = 124615124604835863084731911901282304.0
-	alpha82  = 32667107224410092492483962313449748299776.0
-	alpha96  = 535217884764734955396857238543560676143529984.0
-	alpha112 = 35076039295941670036888435985190792471742381031424.0
-	alpha130 = 9194973245195333150150082162901855101712434733101613056.0
-	scale    = 0.0000000000000000000000000000000000000036734198463196484624023016788195177431833298649127735047148490821200539357960224151611328125
-	offset0  = 6755408030990331.0
-	offset1  = 29014256564239239022116864.0
-	offset2  = 124615283061160854719918951570079744.0
-	offset3  = 535219245894202480694386063513315216128475136.0
-)
-
-// Sum generates an authenticator for m using a one-time key and puts the
+// Sum generates an authenticator for msg using a one-time key and puts the
 // 16-byte result into out. Authenticating two different messages with the same
 // key allows an attacker to forge messages at will.
-func Sum(out *[16]byte, m []byte, key *[32]byte) {
-	r := key
-	s := key[16:]
+func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
 	var (
-		y7        float64
-		y6        float64
-		y1        float64
-		y0        float64
-		y5        float64
-		y4        float64
-		x7        float64
-		x6        float64
-		x1        float64
-		x0        float64
-		y3        float64
-		y2        float64
-		x5        float64
-		r3lowx0   float64
-		x4        float64
-		r0lowx6   float64
-		x3        float64
-		r3highx0  float64
-		x2        float64
-		r0highx6  float64
-		r0lowx0   float64
-		sr1lowx6  float64
-		r0highx0  float64
-		sr1highx6 float64
-		sr3low    float64
-		r1lowx0   float64
-		sr2lowx6  float64
-		r1highx0  float64
-		sr2highx6 float64
-		r2lowx0   float64
-		sr3lowx6  float64
-		r2highx0  float64
-		sr3highx6 float64
-		r1highx4  float64
-		r1lowx4   float64
-		r0highx4  float64
-		r0lowx4   float64
-		sr3highx4 float64
-		sr3lowx4  float64
-		sr2highx4 float64
-		sr2lowx4  float64
-		r0lowx2   float64
-		r0highx2  float64
-		r1lowx2   float64
-		r1highx2  float64
-		r2lowx2   float64
-		r2highx2  float64
-		sr3lowx2  float64
-		sr3highx2 float64
-		z0        float64
-		z1        float64
-		z2        float64
-		z3        float64
-		m0        int64
-		m1        int64
-		m2        int64
-		m3        int64
-		m00       uint32
-		m01       uint32
-		m02       uint32
-		m03       uint32
-		m10       uint32
-		m11       uint32
-		m12       uint32
-		m13       uint32
-		m20       uint32
-		m21       uint32
-		m22       uint32
-		m23       uint32
-		m30       uint32
-		m31       uint32
-		m32       uint32
-		m33       uint64
-		lbelow2   int32
-		lbelow3   int32
-		lbelow4   int32
-		lbelow5   int32
-		lbelow6   int32
-		lbelow7   int32
-		lbelow8   int32
-		lbelow9   int32
-		lbelow10  int32
-		lbelow11  int32
-		lbelow12  int32
-		lbelow13  int32
-		lbelow14  int32
-		lbelow15  int32
-		s00       uint32
-		s01       uint32
-		s02       uint32
-		s03       uint32
-		s10       uint32
-		s11       uint32
-		s12       uint32
-		s13       uint32
-		s20       uint32
-		s21       uint32
-		s22       uint32
-		s23       uint32
-		s30       uint32
-		s31       uint32
-		s32       uint32
-		s33       uint32
-		bits32    uint64
-		f         uint64
-		f0        uint64
-		f1        uint64
-		f2        uint64
-		f3        uint64
-		f4        uint64
-		g         uint64
-		g0        uint64
-		g1        uint64
-		g2        uint64
-		g3        uint64
-		g4        uint64
+		h0, h1, h2, h3, h4 uint32 // the hash accumulators
+		r0, r1, r2, r3, r4 uint64 // the r part of the key
 	)
 
-	var p int32
+	r0 = uint64(binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff)
+	r1 = uint64((binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03)
+	r2 = uint64((binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff)
+	r3 = uint64((binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff)
+	r4 = uint64((binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff)
 
-	l := int32(len(m))
+	R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5
 
-	r00 := uint32(r[0])
+	for len(msg) >= TagSize {
+		// h += msg
+		h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff
+		h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff
+		h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff
+		h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff
+		h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | (1 << 24)
 
-	r01 := uint32(r[1])
+		// h *= r
+		d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
+		d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
+		d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
+		d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
+		d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
 
-	r02 := uint32(r[2])
-	r0 := int64(2151)
+		// h %= p
+		h0 = uint32(d0) & 0x3ffffff
+		h1 = uint32(d1) & 0x3ffffff
+		h2 = uint32(d2) & 0x3ffffff
+		h3 = uint32(d3) & 0x3ffffff
+		h4 = uint32(d4) & 0x3ffffff
 
-	r03 := uint32(r[3])
-	r03 &= 15
-	r0 <<= 51
+		h0 += uint32(d4>>26) * 5
+		h1 += h0 >> 26
+		h0 = h0 & 0x3ffffff
 
-	r10 := uint32(r[4])
-	r10 &= 252
-	r01 <<= 8
-	r0 += int64(r00)
-
-	r11 := uint32(r[5])
-	r02 <<= 16
-	r0 += int64(r01)
-
-	r12 := uint32(r[6])
-	r03 <<= 24
-	r0 += int64(r02)
-
-	r13 := uint32(r[7])
-	r13 &= 15
-	r1 := int64(2215)
-	r0 += int64(r03)
-
-	d0 := r0
-	r1 <<= 51
-	r2 := int64(2279)
-
-	r20 := uint32(r[8])
-	r20 &= 252
-	r11 <<= 8
-	r1 += int64(r10)
-
-	r21 := uint32(r[9])
-	r12 <<= 16
-	r1 += int64(r11)
-
-	r22 := uint32(r[10])
-	r13 <<= 24
-	r1 += int64(r12)
-
-	r23 := uint32(r[11])
-	r23 &= 15
-	r2 <<= 51
-	r1 += int64(r13)
-
-	d1 := r1
-	r21 <<= 8
-	r2 += int64(r20)
-
-	r30 := uint32(r[12])
-	r30 &= 252
-	r22 <<= 16
-	r2 += int64(r21)
-
-	r31 := uint32(r[13])
-	r23 <<= 24
-	r2 += int64(r22)
-
-	r32 := uint32(r[14])
-	r2 += int64(r23)
-	r3 := int64(2343)
-
-	d2 := r2
-	r3 <<= 51
-
-	r33 := uint32(r[15])
-	r33 &= 15
-	r31 <<= 8
-	r3 += int64(r30)
-
-	r32 <<= 16
-	r3 += int64(r31)
-
-	r33 <<= 24
-	r3 += int64(r32)
-
-	r3 += int64(r33)
-	h0 := alpha32 - alpha32
-
-	d3 := r3
-	h1 := alpha32 - alpha32
-
-	h2 := alpha32 - alpha32
-
-	h3 := alpha32 - alpha32
-
-	h4 := alpha32 - alpha32
-
-	r0low := math.Float64frombits(uint64(d0))
-	h5 := alpha32 - alpha32
-
-	r1low := math.Float64frombits(uint64(d1))
-	h6 := alpha32 - alpha32
-
-	r2low := math.Float64frombits(uint64(d2))
-	h7 := alpha32 - alpha32
-
-	r0low -= alpha0
-
-	r1low -= alpha32
-
-	r2low -= alpha64
-
-	r0high := r0low + alpha18
-
-	r3low := math.Float64frombits(uint64(d3))
-
-	r1high := r1low + alpha50
-	sr1low := scale * r1low
-
-	r2high := r2low + alpha82
-	sr2low := scale * r2low
-
-	r0high -= alpha18
-	r0high_stack := r0high
-
-	r3low -= alpha96
-
-	r1high -= alpha50
-	r1high_stack := r1high
-
-	sr1high := sr1low + alpham80
-
-	r0low -= r0high
-
-	r2high -= alpha82
-	sr3low = scale * r3low
-
-	sr2high := sr2low + alpham48
-
-	r1low -= r1high
-	r1low_stack := r1low
-
-	sr1high -= alpham80
-	sr1high_stack := sr1high
-
-	r2low -= r2high
-	r2low_stack := r2low
-
-	sr2high -= alpham48
-	sr2high_stack := sr2high
-
-	r3high := r3low + alpha112
-	r0low_stack := r0low
-
-	sr1low -= sr1high
-	sr1low_stack := sr1low
-
-	sr3high := sr3low + alpham16
-	r2high_stack := r2high
-
-	sr2low -= sr2high
-	sr2low_stack := sr2low
-
-	r3high -= alpha112
-	r3high_stack := r3high
-
-	sr3high -= alpham16
-	sr3high_stack := sr3high
-
-	r3low -= r3high
-	r3low_stack := r3low
-
-	sr3low -= sr3high
-	sr3low_stack := sr3low
-
-	if l < 16 {
-		goto addatmost15bytes
+		msg = msg[TagSize:]
 	}
 
-	m00 = uint32(m[p+0])
-	m0 = 2151
+	if len(msg) > 0 {
+		var block [TagSize]byte
+		off := copy(block[:], msg)
+		block[off] = 0x01
 
-	m0 <<= 51
-	m1 = 2215
-	m01 = uint32(m[p+1])
+		// h += msg
+		h0 += binary.LittleEndian.Uint32(block[0:]) & 0x3ffffff
+		h1 += (binary.LittleEndian.Uint32(block[3:]) >> 2) & 0x3ffffff
+		h2 += (binary.LittleEndian.Uint32(block[6:]) >> 4) & 0x3ffffff
+		h3 += (binary.LittleEndian.Uint32(block[9:]) >> 6) & 0x3ffffff
+		h4 += (binary.LittleEndian.Uint32(block[12:]) >> 8)
 
-	m1 <<= 51
-	m2 = 2279
-	m02 = uint32(m[p+2])
+		// h *= r
+		d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1)
+		d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2)
+		d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3)
+		d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4)
+		d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0)
 
-	m2 <<= 51
-	m3 = 2343
-	m03 = uint32(m[p+3])
+		// h %= p
+		h0 = uint32(d0) & 0x3ffffff
+		h1 = uint32(d1) & 0x3ffffff
+		h2 = uint32(d2) & 0x3ffffff
+		h3 = uint32(d3) & 0x3ffffff
+		h4 = uint32(d4) & 0x3ffffff
 
-	m10 = uint32(m[p+4])
-	m01 <<= 8
-	m0 += int64(m00)
-
-	m11 = uint32(m[p+5])
-	m02 <<= 16
-	m0 += int64(m01)
-
-	m12 = uint32(m[p+6])
-	m03 <<= 24
-	m0 += int64(m02)
-
-	m13 = uint32(m[p+7])
-	m3 <<= 51
-	m0 += int64(m03)
-
-	m20 = uint32(m[p+8])
-	m11 <<= 8
-	m1 += int64(m10)
-
-	m21 = uint32(m[p+9])
-	m12 <<= 16
-	m1 += int64(m11)
-
-	m22 = uint32(m[p+10])
-	m13 <<= 24
-	m1 += int64(m12)
-
-	m23 = uint32(m[p+11])
-	m1 += int64(m13)
-
-	m30 = uint32(m[p+12])
-	m21 <<= 8
-	m2 += int64(m20)
-
-	m31 = uint32(m[p+13])
-	m22 <<= 16
-	m2 += int64(m21)
-
-	m32 = uint32(m[p+14])
-	m23 <<= 24
-	m2 += int64(m22)
-
-	m33 = uint64(m[p+15])
-	m2 += int64(m23)
-
-	d0 = m0
-	m31 <<= 8
-	m3 += int64(m30)
-
-	d1 = m1
-	m32 <<= 16
-	m3 += int64(m31)
-
-	d2 = m2
-	m33 += 256
-
-	m33 <<= 24
-	m3 += int64(m32)
-
-	m3 += int64(m33)
-	d3 = m3
-
-	p += 16
-	l -= 16
-
-	z0 = math.Float64frombits(uint64(d0))
-
-	z1 = math.Float64frombits(uint64(d1))
-
-	z2 = math.Float64frombits(uint64(d2))
-
-	z3 = math.Float64frombits(uint64(d3))
-
-	z0 -= alpha0
-
-	z1 -= alpha32
-
-	z2 -= alpha64
-
-	z3 -= alpha96
-
-	h0 += z0
-
-	h1 += z1
-
-	h3 += z2
-
-	h5 += z3
-
-	if l < 16 {
-		goto multiplyaddatmost15bytes
+		h0 += uint32(d4>>26) * 5
+		h1 += h0 >> 26
+		h0 = h0 & 0x3ffffff
 	}
 
-multiplyaddatleast16bytes:
+	// h %= p reduction
+	h2 += h1 >> 26
+	h1 &= 0x3ffffff
+	h3 += h2 >> 26
+	h2 &= 0x3ffffff
+	h4 += h3 >> 26
+	h3 &= 0x3ffffff
+	h0 += 5 * (h4 >> 26)
+	h4 &= 0x3ffffff
+	h1 += h0 >> 26
+	h0 &= 0x3ffffff
 
-	m2 = 2279
-	m20 = uint32(m[p+8])
-	y7 = h7 + alpha130
+	// h - p
+	t0 := h0 + 5
+	t1 := h1 + (t0 >> 26)
+	t2 := h2 + (t1 >> 26)
+	t3 := h3 + (t2 >> 26)
+	t4 := h4 + (t3 >> 26) - (1 << 26)
+	t0 &= 0x3ffffff
+	t1 &= 0x3ffffff
+	t2 &= 0x3ffffff
+	t3 &= 0x3ffffff
 
-	m2 <<= 51
-	m3 = 2343
-	m21 = uint32(m[p+9])
-	y6 = h6 + alpha130
+	// select h if h < p else h - p
+	t_mask := (t4 >> 31) - 1
+	h_mask := ^t_mask
+	h0 = (h0 & h_mask) | (t0 & t_mask)
+	h1 = (h1 & h_mask) | (t1 & t_mask)
+	h2 = (h2 & h_mask) | (t2 & t_mask)
+	h3 = (h3 & h_mask) | (t3 & t_mask)
+	h4 = (h4 & h_mask) | (t4 & t_mask)
 
-	m3 <<= 51
-	m0 = 2151
-	m22 = uint32(m[p+10])
-	y1 = h1 + alpha32
+	// h %= 2^128
+	h0 |= h1 << 26
+	h1 = ((h1 >> 6) | (h2 << 20))
+	h2 = ((h2 >> 12) | (h3 << 14))
+	h3 = ((h3 >> 18) | (h4 << 8))
 
-	m0 <<= 51
-	m1 = 2215
-	m23 = uint32(m[p+11])
-	y0 = h0 + alpha32
+	// s: the s part of the key
+	// tag = (h + s) % (2^128)
+	t := uint64(h0) + uint64(binary.LittleEndian.Uint32(key[16:]))
+	h0 = uint32(t)
+	t = uint64(h1) + uint64(binary.LittleEndian.Uint32(key[20:])) + (t >> 32)
+	h1 = uint32(t)
+	t = uint64(h2) + uint64(binary.LittleEndian.Uint32(key[24:])) + (t >> 32)
+	h2 = uint32(t)
+	t = uint64(h3) + uint64(binary.LittleEndian.Uint32(key[28:])) + (t >> 32)
+	h3 = uint32(t)
 
-	m1 <<= 51
-	m30 = uint32(m[p+12])
-	y7 -= alpha130
-
-	m21 <<= 8
-	m2 += int64(m20)
-	m31 = uint32(m[p+13])
-	y6 -= alpha130
-
-	m22 <<= 16
-	m2 += int64(m21)
-	m32 = uint32(m[p+14])
-	y1 -= alpha32
-
-	m23 <<= 24
-	m2 += int64(m22)
-	m33 = uint64(m[p+15])
-	y0 -= alpha32
-
-	m2 += int64(m23)
-	m00 = uint32(m[p+0])
-	y5 = h5 + alpha96
-
-	m31 <<= 8
-	m3 += int64(m30)
-	m01 = uint32(m[p+1])
-	y4 = h4 + alpha96
-
-	m32 <<= 16
-	m02 = uint32(m[p+2])
-	x7 = h7 - y7
-	y7 *= scale
-
-	m33 += 256
-	m03 = uint32(m[p+3])
-	x6 = h6 - y6
-	y6 *= scale
-
-	m33 <<= 24
-	m3 += int64(m31)
-	m10 = uint32(m[p+4])
-	x1 = h1 - y1
-
-	m01 <<= 8
-	m3 += int64(m32)
-	m11 = uint32(m[p+5])
-	x0 = h0 - y0
-
-	m3 += int64(m33)
-	m0 += int64(m00)
-	m12 = uint32(m[p+6])
-	y5 -= alpha96
-
-	m02 <<= 16
-	m0 += int64(m01)
-	m13 = uint32(m[p+7])
-	y4 -= alpha96
-
-	m03 <<= 24
-	m0 += int64(m02)
-	d2 = m2
-	x1 += y7
-
-	m0 += int64(m03)
-	d3 = m3
-	x0 += y6
-
-	m11 <<= 8
-	m1 += int64(m10)
-	d0 = m0
-	x7 += y5
-
-	m12 <<= 16
-	m1 += int64(m11)
-	x6 += y4
-
-	m13 <<= 24
-	m1 += int64(m12)
-	y3 = h3 + alpha64
-
-	m1 += int64(m13)
-	d1 = m1
-	y2 = h2 + alpha64
-
-	x0 += x1
-
-	x6 += x7
-
-	y3 -= alpha64
-	r3low = r3low_stack
-
-	y2 -= alpha64
-	r0low = r0low_stack
-
-	x5 = h5 - y5
-	r3lowx0 = r3low * x0
-	r3high = r3high_stack
-
-	x4 = h4 - y4
-	r0lowx6 = r0low * x6
-	r0high = r0high_stack
-
-	x3 = h3 - y3
-	r3highx0 = r3high * x0
-	sr1low = sr1low_stack
-
-	x2 = h2 - y2
-	r0highx6 = r0high * x6
-	sr1high = sr1high_stack
-
-	x5 += y3
-	r0lowx0 = r0low * x0
-	r1low = r1low_stack
-
-	h6 = r3lowx0 + r0lowx6
-	sr1lowx6 = sr1low * x6
-	r1high = r1high_stack
-
-	x4 += y2
-	r0highx0 = r0high * x0
-	sr2low = sr2low_stack
-
-	h7 = r3highx0 + r0highx6
-	sr1highx6 = sr1high * x6
-	sr2high = sr2high_stack
-
-	x3 += y1
-	r1lowx0 = r1low * x0
-	r2low = r2low_stack
-
-	h0 = r0lowx0 + sr1lowx6
-	sr2lowx6 = sr2low * x6
-	r2high = r2high_stack
-
-	x2 += y0
-	r1highx0 = r1high * x0
-	sr3low = sr3low_stack
-
-	h1 = r0highx0 + sr1highx6
-	sr2highx6 = sr2high * x6
-	sr3high = sr3high_stack
-
-	x4 += x5
-	r2lowx0 = r2low * x0
-	z2 = math.Float64frombits(uint64(d2))
-
-	h2 = r1lowx0 + sr2lowx6
-	sr3lowx6 = sr3low * x6
-
-	x2 += x3
-	r2highx0 = r2high * x0
-	z3 = math.Float64frombits(uint64(d3))
-
-	h3 = r1highx0 + sr2highx6
-	sr3highx6 = sr3high * x6
-
-	r1highx4 = r1high * x4
-	z2 -= alpha64
-
-	h4 = r2lowx0 + sr3lowx6
-	r1lowx4 = r1low * x4
-
-	r0highx4 = r0high * x4
-	z3 -= alpha96
-
-	h5 = r2highx0 + sr3highx6
-	r0lowx4 = r0low * x4
-
-	h7 += r1highx4
-	sr3highx4 = sr3high * x4
-
-	h6 += r1lowx4
-	sr3lowx4 = sr3low * x4
-
-	h5 += r0highx4
-	sr2highx4 = sr2high * x4
-
-	h4 += r0lowx4
-	sr2lowx4 = sr2low * x4
-
-	h3 += sr3highx4
-	r0lowx2 = r0low * x2
-
-	h2 += sr3lowx4
-	r0highx2 = r0high * x2
-
-	h1 += sr2highx4
-	r1lowx2 = r1low * x2
-
-	h0 += sr2lowx4
-	r1highx2 = r1high * x2
-
-	h2 += r0lowx2
-	r2lowx2 = r2low * x2
-
-	h3 += r0highx2
-	r2highx2 = r2high * x2
-
-	h4 += r1lowx2
-	sr3lowx2 = sr3low * x2
-
-	h5 += r1highx2
-	sr3highx2 = sr3high * x2
-
-	p += 16
-	l -= 16
-	h6 += r2lowx2
-
-	h7 += r2highx2
-
-	z1 = math.Float64frombits(uint64(d1))
-	h0 += sr3lowx2
-
-	z0 = math.Float64frombits(uint64(d0))
-	h1 += sr3highx2
-
-	z1 -= alpha32
-
-	z0 -= alpha0
-
-	h5 += z3
-
-	h3 += z2
-
-	h1 += z1
-
-	h0 += z0
-
-	if l >= 16 {
-		goto multiplyaddatleast16bytes
-	}
-
-multiplyaddatmost15bytes:
-
-	y7 = h7 + alpha130
-
-	y6 = h6 + alpha130
-
-	y1 = h1 + alpha32
-
-	y0 = h0 + alpha32
-
-	y7 -= alpha130
-
-	y6 -= alpha130
-
-	y1 -= alpha32
-
-	y0 -= alpha32
-
-	y5 = h5 + alpha96
-
-	y4 = h4 + alpha96
-
-	x7 = h7 - y7
-	y7 *= scale
-
-	x6 = h6 - y6
-	y6 *= scale
-
-	x1 = h1 - y1
-
-	x0 = h0 - y0
-
-	y5 -= alpha96
-
-	y4 -= alpha96
-
-	x1 += y7
-
-	x0 += y6
-
-	x7 += y5
-
-	x6 += y4
-
-	y3 = h3 + alpha64
-
-	y2 = h2 + alpha64
-
-	x0 += x1
-
-	x6 += x7
-
-	y3 -= alpha64
-	r3low = r3low_stack
-
-	y2 -= alpha64
-	r0low = r0low_stack
-
-	x5 = h5 - y5
-	r3lowx0 = r3low * x0
-	r3high = r3high_stack
-
-	x4 = h4 - y4
-	r0lowx6 = r0low * x6
-	r0high = r0high_stack
-
-	x3 = h3 - y3
-	r3highx0 = r3high * x0
-	sr1low = sr1low_stack
-
-	x2 = h2 - y2
-	r0highx6 = r0high * x6
-	sr1high = sr1high_stack
-
-	x5 += y3
-	r0lowx0 = r0low * x0
-	r1low = r1low_stack
-
-	h6 = r3lowx0 + r0lowx6
-	sr1lowx6 = sr1low * x6
-	r1high = r1high_stack
-
-	x4 += y2
-	r0highx0 = r0high * x0
-	sr2low = sr2low_stack
-
-	h7 = r3highx0 + r0highx6
-	sr1highx6 = sr1high * x6
-	sr2high = sr2high_stack
-
-	x3 += y1
-	r1lowx0 = r1low * x0
-	r2low = r2low_stack
-
-	h0 = r0lowx0 + sr1lowx6
-	sr2lowx6 = sr2low * x6
-	r2high = r2high_stack
-
-	x2 += y0
-	r1highx0 = r1high * x0
-	sr3low = sr3low_stack
-
-	h1 = r0highx0 + sr1highx6
-	sr2highx6 = sr2high * x6
-	sr3high = sr3high_stack
-
-	x4 += x5
-	r2lowx0 = r2low * x0
-
-	h2 = r1lowx0 + sr2lowx6
-	sr3lowx6 = sr3low * x6
-
-	x2 += x3
-	r2highx0 = r2high * x0
-
-	h3 = r1highx0 + sr2highx6
-	sr3highx6 = sr3high * x6
-
-	r1highx4 = r1high * x4
-
-	h4 = r2lowx0 + sr3lowx6
-	r1lowx4 = r1low * x4
-
-	r0highx4 = r0high * x4
-
-	h5 = r2highx0 + sr3highx6
-	r0lowx4 = r0low * x4
-
-	h7 += r1highx4
-	sr3highx4 = sr3high * x4
-
-	h6 += r1lowx4
-	sr3lowx4 = sr3low * x4
-
-	h5 += r0highx4
-	sr2highx4 = sr2high * x4
-
-	h4 += r0lowx4
-	sr2lowx4 = sr2low * x4
-
-	h3 += sr3highx4
-	r0lowx2 = r0low * x2
-
-	h2 += sr3lowx4
-	r0highx2 = r0high * x2
-
-	h1 += sr2highx4
-	r1lowx2 = r1low * x2
-
-	h0 += sr2lowx4
-	r1highx2 = r1high * x2
-
-	h2 += r0lowx2
-	r2lowx2 = r2low * x2
-
-	h3 += r0highx2
-	r2highx2 = r2high * x2
-
-	h4 += r1lowx2
-	sr3lowx2 = sr3low * x2
-
-	h5 += r1highx2
-	sr3highx2 = sr3high * x2
-
-	h6 += r2lowx2
-
-	h7 += r2highx2
-
-	h0 += sr3lowx2
-
-	h1 += sr3highx2
-
-addatmost15bytes:
-
-	if l == 0 {
-		goto nomorebytes
-	}
-
-	lbelow2 = l - 2
-
-	lbelow3 = l - 3
-
-	lbelow2 >>= 31
-	lbelow4 = l - 4
-
-	m00 = uint32(m[p+0])
-	lbelow3 >>= 31
-	p += lbelow2
-
-	m01 = uint32(m[p+1])
-	lbelow4 >>= 31
-	p += lbelow3
-
-	m02 = uint32(m[p+2])
-	p += lbelow4
-	m0 = 2151
-
-	m03 = uint32(m[p+3])
-	m0 <<= 51
-	m1 = 2215
-
-	m0 += int64(m00)
-	m01 &^= uint32(lbelow2)
-
-	m02 &^= uint32(lbelow3)
-	m01 -= uint32(lbelow2)
-
-	m01 <<= 8
-	m03 &^= uint32(lbelow4)
-
-	m0 += int64(m01)
-	lbelow2 -= lbelow3
-
-	m02 += uint32(lbelow2)
-	lbelow3 -= lbelow4
-
-	m02 <<= 16
-	m03 += uint32(lbelow3)
-
-	m03 <<= 24
-	m0 += int64(m02)
-
-	m0 += int64(m03)
-	lbelow5 = l - 5
-
-	lbelow6 = l - 6
-	lbelow7 = l - 7
-
-	lbelow5 >>= 31
-	lbelow8 = l - 8
-
-	lbelow6 >>= 31
-	p += lbelow5
-
-	m10 = uint32(m[p+4])
-	lbelow7 >>= 31
-	p += lbelow6
-
-	m11 = uint32(m[p+5])
-	lbelow8 >>= 31
-	p += lbelow7
-
-	m12 = uint32(m[p+6])
-	m1 <<= 51
-	p += lbelow8
-
-	m13 = uint32(m[p+7])
-	m10 &^= uint32(lbelow5)
-	lbelow4 -= lbelow5
-
-	m10 += uint32(lbelow4)
-	lbelow5 -= lbelow6
-
-	m11 &^= uint32(lbelow6)
-	m11 += uint32(lbelow5)
-
-	m11 <<= 8
-	m1 += int64(m10)
-
-	m1 += int64(m11)
-	m12 &^= uint32(lbelow7)
-
-	lbelow6 -= lbelow7
-	m13 &^= uint32(lbelow8)
-
-	m12 += uint32(lbelow6)
-	lbelow7 -= lbelow8
-
-	m12 <<= 16
-	m13 += uint32(lbelow7)
-
-	m13 <<= 24
-	m1 += int64(m12)
-
-	m1 += int64(m13)
-	m2 = 2279
-
-	lbelow9 = l - 9
-	m3 = 2343
-
-	lbelow10 = l - 10
-	lbelow11 = l - 11
-
-	lbelow9 >>= 31
-	lbelow12 = l - 12
-
-	lbelow10 >>= 31
-	p += lbelow9
-
-	m20 = uint32(m[p+8])
-	lbelow11 >>= 31
-	p += lbelow10
-
-	m21 = uint32(m[p+9])
-	lbelow12 >>= 31
-	p += lbelow11
-
-	m22 = uint32(m[p+10])
-	m2 <<= 51
-	p += lbelow12
-
-	m23 = uint32(m[p+11])
-	m20 &^= uint32(lbelow9)
-	lbelow8 -= lbelow9
-
-	m20 += uint32(lbelow8)
-	lbelow9 -= lbelow10
-
-	m21 &^= uint32(lbelow10)
-	m21 += uint32(lbelow9)
-
-	m21 <<= 8
-	m2 += int64(m20)
-
-	m2 += int64(m21)
-	m22 &^= uint32(lbelow11)
-
-	lbelow10 -= lbelow11
-	m23 &^= uint32(lbelow12)
-
-	m22 += uint32(lbelow10)
-	lbelow11 -= lbelow12
-
-	m22 <<= 16
-	m23 += uint32(lbelow11)
-
-	m23 <<= 24
-	m2 += int64(m22)
-
-	m3 <<= 51
-	lbelow13 = l - 13
-
-	lbelow13 >>= 31
-	lbelow14 = l - 14
-
-	lbelow14 >>= 31
-	p += lbelow13
-	lbelow15 = l - 15
-
-	m30 = uint32(m[p+12])
-	lbelow15 >>= 31
-	p += lbelow14
-
-	m31 = uint32(m[p+13])
-	p += lbelow15
-	m2 += int64(m23)
-
-	m32 = uint32(m[p+14])
-	m30 &^= uint32(lbelow13)
-	lbelow12 -= lbelow13
-
-	m30 += uint32(lbelow12)
-	lbelow13 -= lbelow14
-
-	m3 += int64(m30)
-	m31 &^= uint32(lbelow14)
-
-	m31 += uint32(lbelow13)
-	m32 &^= uint32(lbelow15)
-
-	m31 <<= 8
-	lbelow14 -= lbelow15
-
-	m3 += int64(m31)
-	m32 += uint32(lbelow14)
-	d0 = m0
-
-	m32 <<= 16
-	m33 = uint64(lbelow15 + 1)
-	d1 = m1
-
-	m33 <<= 24
-	m3 += int64(m32)
-	d2 = m2
-
-	m3 += int64(m33)
-	d3 = m3
-
-	z3 = math.Float64frombits(uint64(d3))
-
-	z2 = math.Float64frombits(uint64(d2))
-
-	z1 = math.Float64frombits(uint64(d1))
-
-	z0 = math.Float64frombits(uint64(d0))
-
-	z3 -= alpha96
-
-	z2 -= alpha64
-
-	z1 -= alpha32
-
-	z0 -= alpha0
-
-	h5 += z3
-
-	h3 += z2
-
-	h1 += z1
-
-	h0 += z0
-
-	y7 = h7 + alpha130
-
-	y6 = h6 + alpha130
-
-	y1 = h1 + alpha32
-
-	y0 = h0 + alpha32
-
-	y7 -= alpha130
-
-	y6 -= alpha130
-
-	y1 -= alpha32
-
-	y0 -= alpha32
-
-	y5 = h5 + alpha96
-
-	y4 = h4 + alpha96
-
-	x7 = h7 - y7
-	y7 *= scale
-
-	x6 = h6 - y6
-	y6 *= scale
-
-	x1 = h1 - y1
-
-	x0 = h0 - y0
-
-	y5 -= alpha96
-
-	y4 -= alpha96
-
-	x1 += y7
-
-	x0 += y6
-
-	x7 += y5
-
-	x6 += y4
-
-	y3 = h3 + alpha64
-
-	y2 = h2 + alpha64
-
-	x0 += x1
-
-	x6 += x7
-
-	y3 -= alpha64
-	r3low = r3low_stack
-
-	y2 -= alpha64
-	r0low = r0low_stack
-
-	x5 = h5 - y5
-	r3lowx0 = r3low * x0
-	r3high = r3high_stack
-
-	x4 = h4 - y4
-	r0lowx6 = r0low * x6
-	r0high = r0high_stack
-
-	x3 = h3 - y3
-	r3highx0 = r3high * x0
-	sr1low = sr1low_stack
-
-	x2 = h2 - y2
-	r0highx6 = r0high * x6
-	sr1high = sr1high_stack
-
-	x5 += y3
-	r0lowx0 = r0low * x0
-	r1low = r1low_stack
-
-	h6 = r3lowx0 + r0lowx6
-	sr1lowx6 = sr1low * x6
-	r1high = r1high_stack
-
-	x4 += y2
-	r0highx0 = r0high * x0
-	sr2low = sr2low_stack
-
-	h7 = r3highx0 + r0highx6
-	sr1highx6 = sr1high * x6
-	sr2high = sr2high_stack
-
-	x3 += y1
-	r1lowx0 = r1low * x0
-	r2low = r2low_stack
-
-	h0 = r0lowx0 + sr1lowx6
-	sr2lowx6 = sr2low * x6
-	r2high = r2high_stack
-
-	x2 += y0
-	r1highx0 = r1high * x0
-	sr3low = sr3low_stack
-
-	h1 = r0highx0 + sr1highx6
-	sr2highx6 = sr2high * x6
-	sr3high = sr3high_stack
-
-	x4 += x5
-	r2lowx0 = r2low * x0
-
-	h2 = r1lowx0 + sr2lowx6
-	sr3lowx6 = sr3low * x6
-
-	x2 += x3
-	r2highx0 = r2high * x0
-
-	h3 = r1highx0 + sr2highx6
-	sr3highx6 = sr3high * x6
-
-	r1highx4 = r1high * x4
-
-	h4 = r2lowx0 + sr3lowx6
-	r1lowx4 = r1low * x4
-
-	r0highx4 = r0high * x4
-
-	h5 = r2highx0 + sr3highx6
-	r0lowx4 = r0low * x4
-
-	h7 += r1highx4
-	sr3highx4 = sr3high * x4
-
-	h6 += r1lowx4
-	sr3lowx4 = sr3low * x4
-
-	h5 += r0highx4
-	sr2highx4 = sr2high * x4
-
-	h4 += r0lowx4
-	sr2lowx4 = sr2low * x4
-
-	h3 += sr3highx4
-	r0lowx2 = r0low * x2
-
-	h2 += sr3lowx4
-	r0highx2 = r0high * x2
-
-	h1 += sr2highx4
-	r1lowx2 = r1low * x2
-
-	h0 += sr2lowx4
-	r1highx2 = r1high * x2
-
-	h2 += r0lowx2
-	r2lowx2 = r2low * x2
-
-	h3 += r0highx2
-	r2highx2 = r2high * x2
-
-	h4 += r1lowx2
-	sr3lowx2 = sr3low * x2
-
-	h5 += r1highx2
-	sr3highx2 = sr3high * x2
-
-	h6 += r2lowx2
-
-	h7 += r2highx2
-
-	h0 += sr3lowx2
-
-	h1 += sr3highx2
-
-nomorebytes:
-
-	y7 = h7 + alpha130
-
-	y0 = h0 + alpha32
-
-	y1 = h1 + alpha32
-
-	y2 = h2 + alpha64
-
-	y7 -= alpha130
-
-	y3 = h3 + alpha64
-
-	y4 = h4 + alpha96
-
-	y5 = h5 + alpha96
-
-	x7 = h7 - y7
-	y7 *= scale
-
-	y0 -= alpha32
-
-	y1 -= alpha32
-
-	y2 -= alpha64
-
-	h6 += x7
-
-	y3 -= alpha64
-
-	y4 -= alpha96
-
-	y5 -= alpha96
-
-	y6 = h6 + alpha130
-
-	x0 = h0 - y0
-
-	x1 = h1 - y1
-
-	x2 = h2 - y2
-
-	y6 -= alpha130
-
-	x0 += y7
-
-	x3 = h3 - y3
-
-	x4 = h4 - y4
-
-	x5 = h5 - y5
-
-	x6 = h6 - y6
-
-	y6 *= scale
-
-	x2 += y0
-
-	x3 += y1
-
-	x4 += y2
-
-	x0 += y6
-
-	x5 += y3
-
-	x6 += y4
-
-	x2 += x3
-
-	x0 += x1
-
-	x4 += x5
-
-	x6 += y5
-
-	x2 += offset1
-	d1 = int64(math.Float64bits(x2))
-
-	x0 += offset0
-	d0 = int64(math.Float64bits(x0))
-
-	x4 += offset2
-	d2 = int64(math.Float64bits(x4))
-
-	x6 += offset3
-	d3 = int64(math.Float64bits(x6))
-
-	f0 = uint64(d0)
-
-	f1 = uint64(d1)
-	bits32 = math.MaxUint64
-
-	f2 = uint64(d2)
-	bits32 >>= 32
-
-	f3 = uint64(d3)
-	f = f0 >> 32
-
-	f0 &= bits32
-	f &= 255
-
-	f1 += f
-	g0 = f0 + 5
-
-	g = g0 >> 32
-	g0 &= bits32
-
-	f = f1 >> 32
-	f1 &= bits32
-
-	f &= 255
-	g1 = f1 + g
-
-	g = g1 >> 32
-	f2 += f
-
-	f = f2 >> 32
-	g1 &= bits32
-
-	f2 &= bits32
-	f &= 255
-
-	f3 += f
-	g2 = f2 + g
-
-	g = g2 >> 32
-	g2 &= bits32
-
-	f4 = f3 >> 32
-	f3 &= bits32
-
-	f4 &= 255
-	g3 = f3 + g
-
-	g = g3 >> 32
-	g3 &= bits32
-
-	g4 = f4 + g
-
-	g4 = g4 - 4
-	s00 = uint32(s[0])
-
-	f = uint64(int64(g4) >> 63)
-	s01 = uint32(s[1])
-
-	f0 &= f
-	g0 &^= f
-	s02 = uint32(s[2])
-
-	f1 &= f
-	f0 |= g0
-	s03 = uint32(s[3])
-
-	g1 &^= f
-	f2 &= f
-	s10 = uint32(s[4])
-
-	f3 &= f
-	g2 &^= f
-	s11 = uint32(s[5])
-
-	g3 &^= f
-	f1 |= g1
-	s12 = uint32(s[6])
-
-	f2 |= g2
-	f3 |= g3
-	s13 = uint32(s[7])
-
-	s01 <<= 8
-	f0 += uint64(s00)
-	s20 = uint32(s[8])
-
-	s02 <<= 16
-	f0 += uint64(s01)
-	s21 = uint32(s[9])
-
-	s03 <<= 24
-	f0 += uint64(s02)
-	s22 = uint32(s[10])
-
-	s11 <<= 8
-	f1 += uint64(s10)
-	s23 = uint32(s[11])
-
-	s12 <<= 16
-	f1 += uint64(s11)
-	s30 = uint32(s[12])
-
-	s13 <<= 24
-	f1 += uint64(s12)
-	s31 = uint32(s[13])
-
-	f0 += uint64(s03)
-	f1 += uint64(s13)
-	s32 = uint32(s[14])
-
-	s21 <<= 8
-	f2 += uint64(s20)
-	s33 = uint32(s[15])
-
-	s22 <<= 16
-	f2 += uint64(s21)
-
-	s23 <<= 24
-	f2 += uint64(s22)
-
-	s31 <<= 8
-	f3 += uint64(s30)
-
-	s32 <<= 16
-	f3 += uint64(s31)
-
-	s33 <<= 24
-	f3 += uint64(s32)
-
-	f2 += uint64(s23)
-	f3 += uint64(s33)
-
-	out[0] = byte(f0)
-	f0 >>= 8
-	out[1] = byte(f0)
-	f0 >>= 8
-	out[2] = byte(f0)
-	f0 >>= 8
-	out[3] = byte(f0)
-	f0 >>= 8
-	f1 += f0
-
-	out[4] = byte(f1)
-	f1 >>= 8
-	out[5] = byte(f1)
-	f1 >>= 8
-	out[6] = byte(f1)
-	f1 >>= 8
-	out[7] = byte(f1)
-	f1 >>= 8
-	f2 += f1
-
-	out[8] = byte(f2)
-	f2 >>= 8
-	out[9] = byte(f2)
-	f2 >>= 8
-	out[10] = byte(f2)
-	f2 >>= 8
-	out[11] = byte(f2)
-	f2 >>= 8
-	f3 += f2
-
-	out[12] = byte(f3)
-	f3 >>= 8
-	out[13] = byte(f3)
-	f3 >>= 8
-	out[14] = byte(f3)
-	f3 >>= 8
-	out[15] = byte(f3)
+	binary.LittleEndian.PutUint32(out[0:], h0)
+	binary.LittleEndian.PutUint32(out[4:], h1)
+	binary.LittleEndian.PutUint32(out[8:], h2)
+	binary.LittleEndian.PutUint32(out[12:], h3)
 }
diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s
index 6e1df96..22afbdc 100644
--- a/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s
+++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s
@@ -5,29 +5,23 @@
 // +build amd64,!appengine,!gccgo
 
 // This code was translated into a form compatible with 6a from the public
-// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html
+// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html
 
 // func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte)
-TEXT ·salsa2020XORKeyStream(SB),0,$512-40
+// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size.
+TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment
 	MOVQ out+0(FP),DI
 	MOVQ in+8(FP),SI
 	MOVQ n+16(FP),DX
 	MOVQ nonce+24(FP),CX
 	MOVQ key+32(FP),R8
 
-	MOVQ SP,R11
-	MOVQ $31,R9
-	NOTQ R9
-	ANDQ R9,SP
-	ADDQ $32,SP
+	MOVQ SP,R12
+	MOVQ SP,R9
+	ADDQ $31, R9
+	ANDQ $~31, R9
+	MOVQ R9, SP
 
-	MOVQ R11,352(SP)
-	MOVQ R12,360(SP)
-	MOVQ R13,368(SP)
-	MOVQ R14,376(SP)
-	MOVQ R15,384(SP)
-	MOVQ BX,392(SP)
-	MOVQ BP,400(SP)
 	MOVQ DX,R9
 	MOVQ CX,DX
 	MOVQ R8,R10
@@ -133,7 +127,7 @@
 	SHRQ $32,CX
 	MOVL DX,16(SP)
 	MOVL CX, 36 (SP)
-	MOVQ R9,408(SP)
+	MOVQ R9,352(SP)
 	MOVQ $20,DX
 	MOVOA 64(SP),X0
 	MOVOA 80(SP),X1
@@ -650,7 +644,7 @@
 	MOVL CX,244(DI)
 	MOVL R8,248(DI)
 	MOVL R9,252(DI)
-	MOVQ 408(SP),R9
+	MOVQ 352(SP),R9
 	SUBQ $256,R9
 	ADDQ $256,SI
 	ADDQ $256,DI
@@ -662,13 +656,13 @@
 	CMPQ R9,$64
 	JAE NOCOPY
 	MOVQ DI,DX
-	LEAQ 416(SP),DI
+	LEAQ 360(SP),DI
 	MOVQ R9,CX
 	REP; MOVSB
-	LEAQ 416(SP),DI
-	LEAQ 416(SP),SI
+	LEAQ 360(SP),DI
+	LEAQ 360(SP),SI
 	NOCOPY:
-	MOVQ R9,408(SP)
+	MOVQ R9,352(SP)
 	MOVOA 48(SP),X0
 	MOVOA 0(SP),X1
 	MOVOA 16(SP),X2
@@ -867,7 +861,7 @@
 	MOVL R8,44(DI)
 	MOVL R9,28(DI)
 	MOVL AX,12(DI)
-	MOVQ 408(SP),R9
+	MOVQ 352(SP),R9
 	MOVL 16(SP),CX
 	MOVL  36 (SP),R8
 	ADDQ $1,CX
@@ -886,14 +880,7 @@
 	REP; MOVSB
 	BYTESATLEAST64:
 	DONE:
-	MOVQ 352(SP),R11
-	MOVQ 360(SP),R12
-	MOVQ 368(SP),R13
-	MOVQ 376(SP),R14
-	MOVQ 384(SP),R15
-	MOVQ 392(SP),BX
-	MOVQ 400(SP),BP
-	MOVQ R11,SP
+	MOVQ R12,SP
 	RET
 	BYTESATLEAST65:
 	SUBQ $64,R9
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
new file mode 100644
index 0000000..18379a9
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go
@@ -0,0 +1,951 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+import (
+	"bytes"
+	"io"
+	"sync"
+	"unicode/utf8"
+)
+
+// EscapeCodes contains escape sequences that can be written to the terminal in
+// order to achieve different styles of text.
+type EscapeCodes struct {
+	// Foreground colors
+	Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte
+
+	// Reset all attributes
+	Reset []byte
+}
+
+var vt100EscapeCodes = EscapeCodes{
+	Black:   []byte{keyEscape, '[', '3', '0', 'm'},
+	Red:     []byte{keyEscape, '[', '3', '1', 'm'},
+	Green:   []byte{keyEscape, '[', '3', '2', 'm'},
+	Yellow:  []byte{keyEscape, '[', '3', '3', 'm'},
+	Blue:    []byte{keyEscape, '[', '3', '4', 'm'},
+	Magenta: []byte{keyEscape, '[', '3', '5', 'm'},
+	Cyan:    []byte{keyEscape, '[', '3', '6', 'm'},
+	White:   []byte{keyEscape, '[', '3', '7', 'm'},
+
+	Reset: []byte{keyEscape, '[', '0', 'm'},
+}
+
+// Terminal contains the state for running a VT100 terminal that is capable of
+// reading lines of input.
+type Terminal struct {
+	// AutoCompleteCallback, if non-null, is called for each keypress with
+	// the full input line and the current position of the cursor (in
+	// bytes, as an index into |line|). If it returns ok=false, the key
+	// press is processed normally. Otherwise it returns a replacement line
+	// and the new cursor position.
+	AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
+
+	// Escape contains a pointer to the escape codes for this terminal.
+	// It's always a valid pointer, although the escape codes themselves
+	// may be empty if the terminal doesn't support them.
+	Escape *EscapeCodes
+
+	// lock protects the terminal and the state in this object from
+	// concurrent processing of a key press and a Write() call.
+	lock sync.Mutex
+
+	c      io.ReadWriter
+	prompt []rune
+
+	// line is the current line being entered.
+	line []rune
+	// pos is the logical position of the cursor in line
+	pos int
+	// echo is true if local echo is enabled
+	echo bool
+	// pasteActive is true iff there is a bracketed paste operation in
+	// progress.
+	pasteActive bool
+
+	// cursorX contains the current X value of the cursor where the left
+	// edge is 0. cursorY contains the row number where the first row of
+	// the current line is 0.
+	cursorX, cursorY int
+	// maxLine is the greatest value of cursorY so far.
+	maxLine int
+
+	termWidth, termHeight int
+
+	// outBuf contains the terminal data to be sent.
+	outBuf []byte
+	// remainder contains the remainder of any partial key sequences after
+	// a read. It aliases into inBuf.
+	remainder []byte
+	inBuf     [256]byte
+
+	// history contains previously entered commands so that they can be
+	// accessed with the up and down keys.
+	history stRingBuffer
+	// historyIndex stores the currently accessed history entry, where zero
+	// means the immediately previous entry.
+	historyIndex int
+	// When navigating up and down the history it's possible to return to
+	// the incomplete, initial line. That value is stored in
+	// historyPending.
+	historyPending string
+}
+
+// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is
+// a local terminal, that terminal must first have been put into raw mode.
+// prompt is a string that is written at the start of each input line (i.e.
+// "> ").
+func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
+	return &Terminal{
+		Escape:       &vt100EscapeCodes,
+		c:            c,
+		prompt:       []rune(prompt),
+		termWidth:    80,
+		termHeight:   24,
+		echo:         true,
+		historyIndex: -1,
+	}
+}
+
+const (
+	keyCtrlD     = 4
+	keyCtrlU     = 21
+	keyEnter     = '\r'
+	keyEscape    = 27
+	keyBackspace = 127
+	keyUnknown   = 0xd800 /* UTF-16 surrogate area */ + iota
+	keyUp
+	keyDown
+	keyLeft
+	keyRight
+	keyAltLeft
+	keyAltRight
+	keyHome
+	keyEnd
+	keyDeleteWord
+	keyDeleteLine
+	keyClearScreen
+	keyPasteStart
+	keyPasteEnd
+)
+
+var (
+	crlf       = []byte{'\r', '\n'}
+	pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'}
+	pasteEnd   = []byte{keyEscape, '[', '2', '0', '1', '~'}
+)
+
+// bytesToKey tries to parse a key sequence from b. If successful, it returns
+// the key and the remainder of the input. Otherwise it returns utf8.RuneError.
+func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
+	if len(b) == 0 {
+		return utf8.RuneError, nil
+	}
+
+	if !pasteActive {
+		switch b[0] {
+		case 1: // ^A
+			return keyHome, b[1:]
+		case 5: // ^E
+			return keyEnd, b[1:]
+		case 8: // ^H
+			return keyBackspace, b[1:]
+		case 11: // ^K
+			return keyDeleteLine, b[1:]
+		case 12: // ^L
+			return keyClearScreen, b[1:]
+		case 23: // ^W
+			return keyDeleteWord, b[1:]
+		}
+	}
+
+	if b[0] != keyEscape {
+		if !utf8.FullRune(b) {
+			return utf8.RuneError, b
+		}
+		r, l := utf8.DecodeRune(b)
+		return r, b[l:]
+	}
+
+	if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' {
+		switch b[2] {
+		case 'A':
+			return keyUp, b[3:]
+		case 'B':
+			return keyDown, b[3:]
+		case 'C':
+			return keyRight, b[3:]
+		case 'D':
+			return keyLeft, b[3:]
+		case 'H':
+			return keyHome, b[3:]
+		case 'F':
+			return keyEnd, b[3:]
+		}
+	}
+
+	if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' {
+		switch b[5] {
+		case 'C':
+			return keyAltRight, b[6:]
+		case 'D':
+			return keyAltLeft, b[6:]
+		}
+	}
+
+	if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) {
+		return keyPasteStart, b[6:]
+	}
+
+	if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) {
+		return keyPasteEnd, b[6:]
+	}
+
+	// If we get here then we have a key that we don't recognise, or a
+	// partial sequence. It's not clear how one should find the end of a
+	// sequence without knowing them all, but it seems that [a-zA-Z~] only
+	// appears at the end of a sequence.
+	for i, c := range b[0:] {
+		if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' {
+			return keyUnknown, b[i+1:]
+		}
+	}
+
+	return utf8.RuneError, b
+}
+
+// queue appends data to the end of t.outBuf
+func (t *Terminal) queue(data []rune) {
+	t.outBuf = append(t.outBuf, []byte(string(data))...)
+}
+
+var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'}
+var space = []rune{' '}
+
+func isPrintable(key rune) bool {
+	isInSurrogateArea := key >= 0xd800 && key <= 0xdbff
+	return key >= 32 && !isInSurrogateArea
+}
+
+// moveCursorToPos appends data to t.outBuf which will move the cursor to the
+// given, logical position in the text.
+func (t *Terminal) moveCursorToPos(pos int) {
+	if !t.echo {
+		return
+	}
+
+	x := visualLength(t.prompt) + pos
+	y := x / t.termWidth
+	x = x % t.termWidth
+
+	up := 0
+	if y < t.cursorY {
+		up = t.cursorY - y
+	}
+
+	down := 0
+	if y > t.cursorY {
+		down = y - t.cursorY
+	}
+
+	left := 0
+	if x < t.cursorX {
+		left = t.cursorX - x
+	}
+
+	right := 0
+	if x > t.cursorX {
+		right = x - t.cursorX
+	}
+
+	t.cursorX = x
+	t.cursorY = y
+	t.move(up, down, left, right)
+}
+
+func (t *Terminal) move(up, down, left, right int) {
+	movement := make([]rune, 3*(up+down+left+right))
+	m := movement
+	for i := 0; i < up; i++ {
+		m[0] = keyEscape
+		m[1] = '['
+		m[2] = 'A'
+		m = m[3:]
+	}
+	for i := 0; i < down; i++ {
+		m[0] = keyEscape
+		m[1] = '['
+		m[2] = 'B'
+		m = m[3:]
+	}
+	for i := 0; i < left; i++ {
+		m[0] = keyEscape
+		m[1] = '['
+		m[2] = 'D'
+		m = m[3:]
+	}
+	for i := 0; i < right; i++ {
+		m[0] = keyEscape
+		m[1] = '['
+		m[2] = 'C'
+		m = m[3:]
+	}
+
+	t.queue(movement)
+}
+
+func (t *Terminal) clearLineToRight() {
+	op := []rune{keyEscape, '[', 'K'}
+	t.queue(op)
+}
+
+const maxLineLength = 4096
+
+func (t *Terminal) setLine(newLine []rune, newPos int) {
+	if t.echo {
+		t.moveCursorToPos(0)
+		t.writeLine(newLine)
+		for i := len(newLine); i < len(t.line); i++ {
+			t.writeLine(space)
+		}
+		t.moveCursorToPos(newPos)
+	}
+	t.line = newLine
+	t.pos = newPos
+}
+
+func (t *Terminal) advanceCursor(places int) {
+	t.cursorX += places
+	t.cursorY += t.cursorX / t.termWidth
+	if t.cursorY > t.maxLine {
+		t.maxLine = t.cursorY
+	}
+	t.cursorX = t.cursorX % t.termWidth
+
+	if places > 0 && t.cursorX == 0 {
+		// Normally terminals will advance the current position
+		// when writing a character. But that doesn't happen
+		// for the last character in a line. However, when
+		// writing a character (except a new line) that causes
+		// a line wrap, the position will be advanced two
+		// places.
+		//
+		// So, if we are stopping at the end of a line, we
+		// need to write a newline so that our cursor can be
+		// advanced to the next line.
+		t.outBuf = append(t.outBuf, '\r', '\n')
+	}
+}
+
+func (t *Terminal) eraseNPreviousChars(n int) {
+	if n == 0 {
+		return
+	}
+
+	if t.pos < n {
+		n = t.pos
+	}
+	t.pos -= n
+	t.moveCursorToPos(t.pos)
+
+	copy(t.line[t.pos:], t.line[n+t.pos:])
+	t.line = t.line[:len(t.line)-n]
+	if t.echo {
+		t.writeLine(t.line[t.pos:])
+		for i := 0; i < n; i++ {
+			t.queue(space)
+		}
+		t.advanceCursor(n)
+		t.moveCursorToPos(t.pos)
+	}
+}
+
+// countToLeftWord returns then number of characters from the cursor to the
+// start of the previous word.
+func (t *Terminal) countToLeftWord() int {
+	if t.pos == 0 {
+		return 0
+	}
+
+	pos := t.pos - 1
+	for pos > 0 {
+		if t.line[pos] != ' ' {
+			break
+		}
+		pos--
+	}
+	for pos > 0 {
+		if t.line[pos] == ' ' {
+			pos++
+			break
+		}
+		pos--
+	}
+
+	return t.pos - pos
+}
+
+// countToRightWord returns then number of characters from the cursor to the
+// start of the next word.
+func (t *Terminal) countToRightWord() int {
+	pos := t.pos
+	for pos < len(t.line) {
+		if t.line[pos] == ' ' {
+			break
+		}
+		pos++
+	}
+	for pos < len(t.line) {
+		if t.line[pos] != ' ' {
+			break
+		}
+		pos++
+	}
+	return pos - t.pos
+}
+
+// visualLength returns the number of visible glyphs in s.
+func visualLength(runes []rune) int {
+	inEscapeSeq := false
+	length := 0
+
+	for _, r := range runes {
+		switch {
+		case inEscapeSeq:
+			if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') {
+				inEscapeSeq = false
+			}
+		case r == '\x1b':
+			inEscapeSeq = true
+		default:
+			length++
+		}
+	}
+
+	return length
+}
+
+// handleKey processes the given key and, optionally, returns a line of text
+// that the user has entered.
+func (t *Terminal) handleKey(key rune) (line string, ok bool) {
+	if t.pasteActive && key != keyEnter {
+		t.addKeyToLine(key)
+		return
+	}
+
+	switch key {
+	case keyBackspace:
+		if t.pos == 0 {
+			return
+		}
+		t.eraseNPreviousChars(1)
+	case keyAltLeft:
+		// move left by a word.
+		t.pos -= t.countToLeftWord()
+		t.moveCursorToPos(t.pos)
+	case keyAltRight:
+		// move right by a word.
+		t.pos += t.countToRightWord()
+		t.moveCursorToPos(t.pos)
+	case keyLeft:
+		if t.pos == 0 {
+			return
+		}
+		t.pos--
+		t.moveCursorToPos(t.pos)
+	case keyRight:
+		if t.pos == len(t.line) {
+			return
+		}
+		t.pos++
+		t.moveCursorToPos(t.pos)
+	case keyHome:
+		if t.pos == 0 {
+			return
+		}
+		t.pos = 0
+		t.moveCursorToPos(t.pos)
+	case keyEnd:
+		if t.pos == len(t.line) {
+			return
+		}
+		t.pos = len(t.line)
+		t.moveCursorToPos(t.pos)
+	case keyUp:
+		entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1)
+		if !ok {
+			return "", false
+		}
+		if t.historyIndex == -1 {
+			t.historyPending = string(t.line)
+		}
+		t.historyIndex++
+		runes := []rune(entry)
+		t.setLine(runes, len(runes))
+	case keyDown:
+		switch t.historyIndex {
+		case -1:
+			return
+		case 0:
+			runes := []rune(t.historyPending)
+			t.setLine(runes, len(runes))
+			t.historyIndex--
+		default:
+			entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1)
+			if ok {
+				t.historyIndex--
+				runes := []rune(entry)
+				t.setLine(runes, len(runes))
+			}
+		}
+	case keyEnter:
+		t.moveCursorToPos(len(t.line))
+		t.queue([]rune("\r\n"))
+		line = string(t.line)
+		ok = true
+		t.line = t.line[:0]
+		t.pos = 0
+		t.cursorX = 0
+		t.cursorY = 0
+		t.maxLine = 0
+	case keyDeleteWord:
+		// Delete zero or more spaces and then one or more characters.
+		t.eraseNPreviousChars(t.countToLeftWord())
+	case keyDeleteLine:
+		// Delete everything from the current cursor position to the
+		// end of line.
+		for i := t.pos; i < len(t.line); i++ {
+			t.queue(space)
+			t.advanceCursor(1)
+		}
+		t.line = t.line[:t.pos]
+		t.moveCursorToPos(t.pos)
+	case keyCtrlD:
+		// Erase the character under the current position.
+		// The EOF case when the line is empty is handled in
+		// readLine().
+		if t.pos < len(t.line) {
+			t.pos++
+			t.eraseNPreviousChars(1)
+		}
+	case keyCtrlU:
+		t.eraseNPreviousChars(t.pos)
+	case keyClearScreen:
+		// Erases the screen and moves the cursor to the home position.
+		t.queue([]rune("\x1b[2J\x1b[H"))
+		t.queue(t.prompt)
+		t.cursorX, t.cursorY = 0, 0
+		t.advanceCursor(visualLength(t.prompt))
+		t.setLine(t.line, t.pos)
+	default:
+		if t.AutoCompleteCallback != nil {
+			prefix := string(t.line[:t.pos])
+			suffix := string(t.line[t.pos:])
+
+			t.lock.Unlock()
+			newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key)
+			t.lock.Lock()
+
+			if completeOk {
+				t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos]))
+				return
+			}
+		}
+		if !isPrintable(key) {
+			return
+		}
+		if len(t.line) == maxLineLength {
+			return
+		}
+		t.addKeyToLine(key)
+	}
+	return
+}
+
+// addKeyToLine inserts the given key at the current position in the current
+// line.
+func (t *Terminal) addKeyToLine(key rune) {
+	if len(t.line) == cap(t.line) {
+		newLine := make([]rune, len(t.line), 2*(1+len(t.line)))
+		copy(newLine, t.line)
+		t.line = newLine
+	}
+	t.line = t.line[:len(t.line)+1]
+	copy(t.line[t.pos+1:], t.line[t.pos:])
+	t.line[t.pos] = key
+	if t.echo {
+		t.writeLine(t.line[t.pos:])
+	}
+	t.pos++
+	t.moveCursorToPos(t.pos)
+}
+
+func (t *Terminal) writeLine(line []rune) {
+	for len(line) != 0 {
+		remainingOnLine := t.termWidth - t.cursorX
+		todo := len(line)
+		if todo > remainingOnLine {
+			todo = remainingOnLine
+		}
+		t.queue(line[:todo])
+		t.advanceCursor(visualLength(line[:todo]))
+		line = line[todo:]
+	}
+}
+
+// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n.
+func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) {
+	for len(buf) > 0 {
+		i := bytes.IndexByte(buf, '\n')
+		todo := len(buf)
+		if i >= 0 {
+			todo = i
+		}
+
+		var nn int
+		nn, err = w.Write(buf[:todo])
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		buf = buf[todo:]
+
+		if i >= 0 {
+			if _, err = w.Write(crlf); err != nil {
+				return n, err
+			}
+			n += 1
+			buf = buf[1:]
+		}
+	}
+
+	return n, nil
+}
+
+func (t *Terminal) Write(buf []byte) (n int, err error) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	if t.cursorX == 0 && t.cursorY == 0 {
+		// This is the easy case: there's nothing on the screen that we
+		// have to move out of the way.
+		return writeWithCRLF(t.c, buf)
+	}
+
+	// We have a prompt and possibly user input on the screen. We
+	// have to clear it first.
+	t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */)
+	t.cursorX = 0
+	t.clearLineToRight()
+
+	for t.cursorY > 0 {
+		t.move(1 /* up */, 0, 0, 0)
+		t.cursorY--
+		t.clearLineToRight()
+	}
+
+	if _, err = t.c.Write(t.outBuf); err != nil {
+		return
+	}
+	t.outBuf = t.outBuf[:0]
+
+	if n, err = writeWithCRLF(t.c, buf); err != nil {
+		return
+	}
+
+	t.writeLine(t.prompt)
+	if t.echo {
+		t.writeLine(t.line)
+	}
+
+	t.moveCursorToPos(t.pos)
+
+	if _, err = t.c.Write(t.outBuf); err != nil {
+		return
+	}
+	t.outBuf = t.outBuf[:0]
+	return
+}
+
+// ReadPassword temporarily changes the prompt and reads a password, without
+// echo, from the terminal.
+func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	oldPrompt := t.prompt
+	t.prompt = []rune(prompt)
+	t.echo = false
+
+	line, err = t.readLine()
+
+	t.prompt = oldPrompt
+	t.echo = true
+
+	return
+}
+
+// ReadLine returns a line of input from the terminal.
+func (t *Terminal) ReadLine() (line string, err error) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	return t.readLine()
+}
+
+func (t *Terminal) readLine() (line string, err error) {
+	// t.lock must be held at this point
+
+	if t.cursorX == 0 && t.cursorY == 0 {
+		t.writeLine(t.prompt)
+		t.c.Write(t.outBuf)
+		t.outBuf = t.outBuf[:0]
+	}
+
+	lineIsPasted := t.pasteActive
+
+	for {
+		rest := t.remainder
+		lineOk := false
+		for !lineOk {
+			var key rune
+			key, rest = bytesToKey(rest, t.pasteActive)
+			if key == utf8.RuneError {
+				break
+			}
+			if !t.pasteActive {
+				if key == keyCtrlD {
+					if len(t.line) == 0 {
+						return "", io.EOF
+					}
+				}
+				if key == keyPasteStart {
+					t.pasteActive = true
+					if len(t.line) == 0 {
+						lineIsPasted = true
+					}
+					continue
+				}
+			} else if key == keyPasteEnd {
+				t.pasteActive = false
+				continue
+			}
+			if !t.pasteActive {
+				lineIsPasted = false
+			}
+			line, lineOk = t.handleKey(key)
+		}
+		if len(rest) > 0 {
+			n := copy(t.inBuf[:], rest)
+			t.remainder = t.inBuf[:n]
+		} else {
+			t.remainder = nil
+		}
+		t.c.Write(t.outBuf)
+		t.outBuf = t.outBuf[:0]
+		if lineOk {
+			if t.echo {
+				t.historyIndex = -1
+				t.history.Add(line)
+			}
+			if lineIsPasted {
+				err = ErrPasteIndicator
+			}
+			return
+		}
+
+		// t.remainder is a slice at the beginning of t.inBuf
+		// containing a partial key sequence
+		readBuf := t.inBuf[len(t.remainder):]
+		var n int
+
+		t.lock.Unlock()
+		n, err = t.c.Read(readBuf)
+		t.lock.Lock()
+
+		if err != nil {
+			return
+		}
+
+		t.remainder = t.inBuf[:n+len(t.remainder)]
+	}
+}
+
+// SetPrompt sets the prompt to be used when reading subsequent lines.
+func (t *Terminal) SetPrompt(prompt string) {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	t.prompt = []rune(prompt)
+}
+
+func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) {
+	// Move cursor to column zero at the start of the line.
+	t.move(t.cursorY, 0, t.cursorX, 0)
+	t.cursorX, t.cursorY = 0, 0
+	t.clearLineToRight()
+	for t.cursorY < numPrevLines {
+		// Move down a line
+		t.move(0, 1, 0, 0)
+		t.cursorY++
+		t.clearLineToRight()
+	}
+	// Move back to beginning.
+	t.move(t.cursorY, 0, 0, 0)
+	t.cursorX, t.cursorY = 0, 0
+
+	t.queue(t.prompt)
+	t.advanceCursor(visualLength(t.prompt))
+	t.writeLine(t.line)
+	t.moveCursorToPos(t.pos)
+}
+
+func (t *Terminal) SetSize(width, height int) error {
+	t.lock.Lock()
+	defer t.lock.Unlock()
+
+	if width == 0 {
+		width = 1
+	}
+
+	oldWidth := t.termWidth
+	t.termWidth, t.termHeight = width, height
+
+	switch {
+	case width == oldWidth:
+		// If the width didn't change then nothing else needs to be
+		// done.
+		return nil
+	case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0:
+		// If there is nothing on current line and no prompt printed,
+		// just do nothing
+		return nil
+	case width < oldWidth:
+		// Some terminals (e.g. xterm) will truncate lines that were
+		// too long when shinking. Others, (e.g. gnome-terminal) will
+		// attempt to wrap them. For the former, repainting t.maxLine
+		// works great, but that behaviour goes badly wrong in the case
+		// of the latter because they have doubled every full line.
+
+		// We assume that we are working on a terminal that wraps lines
+		// and adjust the cursor position based on every previous line
+		// wrapping and turning into two. This causes the prompt on
+		// xterms to move upwards, which isn't great, but it avoids a
+		// huge mess with gnome-terminal.
+		if t.cursorX >= t.termWidth {
+			t.cursorX = t.termWidth - 1
+		}
+		t.cursorY *= 2
+		t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2)
+	case width > oldWidth:
+		// If the terminal expands then our position calculations will
+		// be wrong in the future because we think the cursor is
+		// |t.pos| chars into the string, but there will be a gap at
+		// the end of any wrapped line.
+		//
+		// But the position will actually be correct until we move, so
+		// we can move back to the beginning and repaint everything.
+		t.clearAndRepaintLinePlusNPrevious(t.maxLine)
+	}
+
+	_, err := t.c.Write(t.outBuf)
+	t.outBuf = t.outBuf[:0]
+	return err
+}
+
+type pasteIndicatorError struct{}
+
+func (pasteIndicatorError) Error() string {
+	return "terminal: ErrPasteIndicator not correctly handled"
+}
+
+// ErrPasteIndicator may be returned from ReadLine as the error, in addition
+// to valid line data. It indicates that bracketed paste mode is enabled and
+// that the returned line consists only of pasted data. Programs may wish to
+// interpret pasted data more literally than typed data.
+var ErrPasteIndicator = pasteIndicatorError{}
+
+// SetBracketedPasteMode requests that the terminal bracket paste operations
+// with markers. Not all terminals support this but, if it is supported, then
+// enabling this mode will stop any autocomplete callback from running due to
+// pastes. Additionally, any lines that are completely pasted will be returned
+// from ReadLine with the error set to ErrPasteIndicator.
+func (t *Terminal) SetBracketedPasteMode(on bool) {
+	if on {
+		io.WriteString(t.c, "\x1b[?2004h")
+	} else {
+		io.WriteString(t.c, "\x1b[?2004l")
+	}
+}
+
+// stRingBuffer is a ring buffer of strings.
+type stRingBuffer struct {
+	// entries contains max elements.
+	entries []string
+	max     int
+	// head contains the index of the element most recently added to the ring.
+	head int
+	// size contains the number of elements in the ring.
+	size int
+}
+
+func (s *stRingBuffer) Add(a string) {
+	if s.entries == nil {
+		const defaultNumEntries = 100
+		s.entries = make([]string, defaultNumEntries)
+		s.max = defaultNumEntries
+	}
+
+	s.head = (s.head + 1) % s.max
+	s.entries[s.head] = a
+	if s.size < s.max {
+		s.size++
+	}
+}
+
+// NthPreviousEntry returns the value passed to the nth previous call to Add.
+// If n is zero then the immediately prior value is returned, if one, then the
+// next most recent, and so on. If such an element doesn't exist then ok is
+// false.
+func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) {
+	if n >= s.size {
+		return "", false
+	}
+	index := s.head - n
+	if index < 0 {
+		index += s.max
+	}
+	return s.entries[index], true
+}
+
+// readPasswordLine reads from reader until it finds \n or io.EOF.
+// The slice returned does not include the \n.
+// readPasswordLine also ignores any \r it finds.
+func readPasswordLine(reader io.Reader) ([]byte, error) {
+	var buf [1]byte
+	var ret []byte
+
+	for {
+		n, err := reader.Read(buf[:])
+		if n > 0 {
+			switch buf[0] {
+			case '\n':
+				return ret, nil
+			case '\r':
+				// remove \r from passwords on Windows
+			default:
+				ret = append(ret, buf[0])
+			}
+			continue
+		}
+		if err != nil {
+			if err == io.EOF && len(ret) > 0 {
+				return ret, nil
+			}
+			return ret, err
+		}
+	}
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go
new file mode 100644
index 0000000..d019196
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go
@@ -0,0 +1,119 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!appengine netbsd openbsd
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// 	oldState, err := terminal.MakeRaw(0)
+// 	if err != nil {
+// 	        panic(err)
+// 	}
+// 	defer terminal.Restore(0, oldState)
+package terminal // import "golang.org/x/crypto/ssh/terminal"
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// State contains the state of a terminal.
+type State struct {
+	termios syscall.Termios
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	var termios syscall.Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+	var oldState State
+	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState.termios
+	// This attempts to replicate the behaviour documented for cfmakeraw in
+	// the termios(3) manpage.
+	newState.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
+	newState.Oflag &^= syscall.OPOST
+	newState.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
+	newState.Cflag &^= syscall.CSIZE | syscall.PARENB
+	newState.Cflag |= syscall.CS8
+	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+	var oldState State
+	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState.termios)), 0, 0, 0); err != 0 {
+		return nil, err
+	}
+
+	return &oldState, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, state *State) error {
+	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0); err != 0 {
+		return err
+	}
+	return nil
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+	var dimensions [4]uint16
+
+	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(&dimensions)), 0, 0, 0); err != 0 {
+		return -1, -1, err
+	}
+	return int(dimensions[1]), int(dimensions[0]), nil
+}
+
+// passwordReader is an io.Reader that reads from a specific file descriptor.
+type passwordReader int
+
+func (r passwordReader) Read(buf []byte) (int, error) {
+	return syscall.Read(int(r), buf)
+}
+
+// ReadPassword reads a line of input from a terminal without local echo.  This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+	var oldState syscall.Termios
+	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0); err != 0 {
+		return nil, err
+	}
+
+	newState := oldState
+	newState.Lflag &^= syscall.ECHO
+	newState.Lflag |= syscall.ICANON | syscall.ISIG
+	newState.Iflag |= syscall.ICRNL
+	if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&newState)), 0, 0, 0); err != 0 {
+		return nil, err
+	}
+
+	defer func() {
+		syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&oldState)), 0, 0, 0)
+	}()
+
+	return readPasswordLine(passwordReader(fd))
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
new file mode 100644
index 0000000..cb23a59
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go
@@ -0,0 +1,12 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package terminal
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TIOCGETA
+const ioctlWriteTermios = unix.TIOCSETA
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
new file mode 100644
index 0000000..5fadfe8
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go
@@ -0,0 +1,10 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package terminal
+
+import "golang.org/x/sys/unix"
+
+const ioctlReadTermios = unix.TCGETS
+const ioctlWriteTermios = unix.TCSETS
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
new file mode 100644
index 0000000..799f049
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go
@@ -0,0 +1,58 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// 	oldState, err := terminal.MakeRaw(0)
+// 	if err != nil {
+// 	        panic(err)
+// 	}
+// 	defer terminal.Restore(0, oldState)
+package terminal
+
+import (
+	"fmt"
+	"runtime"
+)
+
+type State struct{}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	return false
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+	return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+	return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, state *State) error {
+	return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+	return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
+
+// ReadPassword reads a line of input from a terminal without local echo.  This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+	return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
new file mode 100644
index 0000000..a2e1b57
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build solaris
+
+package terminal // import "golang.org/x/crypto/ssh/terminal"
+
+import (
+	"golang.org/x/sys/unix"
+	"io"
+	"syscall"
+)
+
+// State contains the state of a terminal.
+type State struct {
+	state *unix.Termios
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	_, err := unix.IoctlGetTermio(fd, unix.TCGETA)
+	return err == nil
+}
+
+// ReadPassword reads a line of input from a terminal without local echo.  This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+	// see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c
+	val, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+	if err != nil {
+		return nil, err
+	}
+	oldState := *val
+
+	newState := oldState
+	newState.Lflag &^= syscall.ECHO
+	newState.Lflag |= syscall.ICANON | syscall.ISIG
+	newState.Iflag |= syscall.ICRNL
+	err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState)
+	if err != nil {
+		return nil, err
+	}
+
+	defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState)
+
+	var buf [16]byte
+	var ret []byte
+	for {
+		n, err := syscall.Read(fd, buf[:])
+		if err != nil {
+			return nil, err
+		}
+		if n == 0 {
+			if len(ret) == 0 {
+				return nil, io.EOF
+			}
+			break
+		}
+		if buf[n-1] == '\n' {
+			n--
+		}
+		ret = append(ret, buf[:n]...)
+		if n < len(buf) {
+			break
+		}
+	}
+
+	return ret, nil
+}
+
+// MakeRaw puts the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+// see http://cr.illumos.org/~webrev/andy_js/1060/
+func MakeRaw(fd int) (*State, error) {
+	oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+	if err != nil {
+		return nil, err
+	}
+	oldTermios := *oldTermiosPtr
+
+	newTermios := oldTermios
+	newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
+	newTermios.Oflag &^= syscall.OPOST
+	newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
+	newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB
+	newTermios.Cflag |= syscall.CS8
+	newTermios.Cc[unix.VMIN] = 1
+	newTermios.Cc[unix.VTIME] = 0
+
+	if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil {
+		return nil, err
+	}
+
+	return &State{
+		state: oldTermiosPtr,
+	}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, oldState *State) error {
+	return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state)
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+	oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
+	if err != nil {
+		return nil, err
+	}
+
+	return &State{
+		state: oldTermiosPtr,
+	}, nil
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+	ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ)
+	if err != nil {
+		return 0, 0, err
+	}
+	return int(ws.Col), int(ws.Row), nil
+}
diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
new file mode 100644
index 0000000..e0a1f36
--- /dev/null
+++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
@@ -0,0 +1,155 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows
+
+// Package terminal provides support functions for dealing with terminals, as
+// commonly found on UNIX systems.
+//
+// Putting a terminal into raw mode is the most common requirement:
+//
+// 	oldState, err := terminal.MakeRaw(0)
+// 	if err != nil {
+// 	        panic(err)
+// 	}
+// 	defer terminal.Restore(0, oldState)
+package terminal
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	enableLineInput       = 2
+	enableEchoInput       = 4
+	enableProcessedInput  = 1
+	enableWindowInput     = 8
+	enableMouseInput      = 16
+	enableInsertMode      = 32
+	enableQuickEditMode   = 64
+	enableExtendedFlags   = 128
+	enableAutoPosition    = 256
+	enableProcessedOutput = 1
+	enableWrapAtEolOutput = 2
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode             = kernel32.NewProc("GetConsoleMode")
+	procSetConsoleMode             = kernel32.NewProc("SetConsoleMode")
+	procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+)
+
+type (
+	short int16
+	word  uint16
+
+	coord struct {
+		x short
+		y short
+	}
+	smallRect struct {
+		left   short
+		top    short
+		right  short
+		bottom short
+	}
+	consoleScreenBufferInfo struct {
+		size              coord
+		cursorPosition    coord
+		attributes        word
+		window            smallRect
+		maximumWindowSize coord
+	}
+)
+
+type State struct {
+	mode uint32
+}
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(fd int) bool {
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
+
+// MakeRaw put the terminal connected to the given file descriptor into raw
+// mode and returns the previous state of the terminal so that it can be
+// restored.
+func MakeRaw(fd int) (*State, error) {
+	var st uint32
+	_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	if e != 0 {
+		return nil, error(e)
+	}
+	raw := st &^ (enableEchoInput | enableProcessedInput | enableLineInput | enableProcessedOutput)
+	_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(raw), 0)
+	if e != 0 {
+		return nil, error(e)
+	}
+	return &State{st}, nil
+}
+
+// GetState returns the current state of a terminal which may be useful to
+// restore the terminal after a signal.
+func GetState(fd int) (*State, error) {
+	var st uint32
+	_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	if e != 0 {
+		return nil, error(e)
+	}
+	return &State{st}, nil
+}
+
+// Restore restores the terminal connected to the given file descriptor to a
+// previous state.
+func Restore(fd int, state *State) error {
+	_, _, err := syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(state.mode), 0)
+	return err
+}
+
+// GetSize returns the dimensions of the given terminal.
+func GetSize(fd int) (width, height int, err error) {
+	var info consoleScreenBufferInfo
+	_, _, e := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&info)), 0)
+	if e != 0 {
+		return 0, 0, error(e)
+	}
+	return int(info.size.x), int(info.size.y), nil
+}
+
+// passwordReader is an io.Reader that reads from a specific Windows HANDLE.
+type passwordReader int
+
+func (r passwordReader) Read(buf []byte) (int, error) {
+	return syscall.Read(syscall.Handle(r), buf)
+}
+
+// ReadPassword reads a line of input from a terminal without local echo.  This
+// is commonly used for inputting passwords and other sensitive data. The slice
+// returned does not include the \n.
+func ReadPassword(fd int) ([]byte, error) {
+	var st uint32
+	_, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	if e != 0 {
+		return nil, error(e)
+	}
+	old := st
+
+	st &^= (enableEchoInput)
+	st |= (enableProcessedInput | enableLineInput | enableProcessedOutput)
+	_, _, e = syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(st), 0)
+	if e != 0 {
+		return nil, error(e)
+	}
+
+	defer func() {
+		syscall.Syscall(procSetConsoleMode.Addr(), 2, uintptr(fd), uintptr(old), 0)
+	}()
+
+	return readPasswordLine(passwordReader(fd))
+}
diff --git a/vendor/golang.org/x/sys/README b/vendor/golang.org/x/sys/README
deleted file mode 100644
index bd422b4..0000000
--- a/vendor/golang.org/x/sys/README
+++ /dev/null
@@ -1,3 +0,0 @@
-This repository holds supplemental Go packages for low-level interactions with the operating system.
-
-To submit changes to this repository, see http://golang.org/doc/contribute.html.
diff --git a/vendor/golang.org/x/sys/README.md b/vendor/golang.org/x/sys/README.md
new file mode 100644
index 0000000..ef6c9e5
--- /dev/null
+++ b/vendor/golang.org/x/sys/README.md
@@ -0,0 +1,18 @@
+# sys
+
+This repository holds supplemental Go packages for low-level interactions with
+the operating system.
+
+## Download/Install
+
+The easiest way to install is to run `go get -u golang.org/x/sys`. You can
+also manually git clone the repository to `$GOPATH/src/golang.org/x/sys`.
+
+## Report Issues / Send Patches
+
+This repository uses Gerrit for code changes. To learn how to submit changes to
+this repository, see https://golang.org/doc/contribute.html.
+
+The main issue tracker for the sys repository is located at
+https://github.com/golang/go/issues. Prefix your issue with "x/sys:" in the
+subject line, so it is easy to find.
diff --git a/vendor/golang.org/x/sys/unix/dev_darwin.go b/vendor/golang.org/x/sys/unix/dev_darwin.go
new file mode 100644
index 0000000..8d1dc0f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_darwin.go
@@ -0,0 +1,24 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Darwin's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a Darwin device number.
+func Major(dev uint64) uint32 {
+	return uint32((dev >> 24) & 0xff)
+}
+
+// Minor returns the minor component of a Darwin device number.
+func Minor(dev uint64) uint32 {
+	return uint32(dev & 0xffffff)
+}
+
+// Mkdev returns a Darwin device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+	return (uint64(major) << 24) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_dragonfly.go b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
new file mode 100644
index 0000000..8502f20
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_dragonfly.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in Dragonfly's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a DragonFlyBSD device number.
+func Major(dev uint64) uint32 {
+	return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a DragonFlyBSD device number.
+func Minor(dev uint64) uint32 {
+	return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a DragonFlyBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+	return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_freebsd.go b/vendor/golang.org/x/sys/unix/dev_freebsd.go
new file mode 100644
index 0000000..eba3b4b
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_freebsd.go
@@ -0,0 +1,30 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in FreeBSD's sys/types.h header.
+//
+// The information below is extracted and adapted from sys/types.h:
+//
+// Minor gives a cookie instead of an index since in order to avoid changing the
+// meanings of bits 0-15 or wasting time and space shifting bits 16-31 for
+// devices that don't use them.
+
+package unix
+
+// Major returns the major component of a FreeBSD device number.
+func Major(dev uint64) uint32 {
+	return uint32((dev >> 8) & 0xff)
+}
+
+// Minor returns the minor component of a FreeBSD device number.
+func Minor(dev uint64) uint32 {
+	return uint32(dev & 0xffff00ff)
+}
+
+// Mkdev returns a FreeBSD device number generated from the given major and
+// minor components.
+func Mkdev(major, minor uint32) uint64 {
+	return (uint64(major) << 8) | uint64(minor)
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_linux.go b/vendor/golang.org/x/sys/unix/dev_linux.go
index c902c39..d165d6f 100644
--- a/vendor/golang.org/x/sys/unix/dev_linux.go
+++ b/vendor/golang.org/x/sys/unix/dev_linux.go
@@ -34,9 +34,9 @@
 // Mkdev returns a Linux device number generated from the given major and minor
 // components.
 func Mkdev(major, minor uint32) uint64 {
-	dev := uint64((major & 0x00000fff) << 8)
-	dev |= uint64((major & 0xfffff000) << 32)
-	dev |= uint64((minor & 0x000000ff) << 0)
-	dev |= uint64((minor & 0xffffff00) << 12)
+	dev := (uint64(major) & 0x00000fff) << 8
+	dev |= (uint64(major) & 0xfffff000) << 32
+	dev |= (uint64(minor) & 0x000000ff) << 0
+	dev |= (uint64(minor) & 0xffffff00) << 12
 	return dev
 }
diff --git a/vendor/golang.org/x/sys/unix/dev_netbsd.go b/vendor/golang.org/x/sys/unix/dev_netbsd.go
new file mode 100644
index 0000000..b4a203d
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_netbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in NetBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of a NetBSD device number.
+func Major(dev uint64) uint32 {
+	return uint32((dev & 0x000fff00) >> 8)
+}
+
+// Minor returns the minor component of a NetBSD device number.
+func Minor(dev uint64) uint32 {
+	minor := uint32((dev & 0x000000ff) >> 0)
+	minor |= uint32((dev & 0xfff00000) >> 12)
+	return minor
+}
+
+// Mkdev returns a NetBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+	dev := (uint64(major) << 8) & 0x000fff00
+	dev |= (uint64(minor) << 12) & 0xfff00000
+	dev |= (uint64(minor) << 0) & 0x000000ff
+	return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/dev_openbsd.go b/vendor/golang.org/x/sys/unix/dev_openbsd.go
new file mode 100644
index 0000000..f3430c4
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/dev_openbsd.go
@@ -0,0 +1,29 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Functions to access/create device major and minor numbers matching the
+// encoding used in OpenBSD's sys/types.h header.
+
+package unix
+
+// Major returns the major component of an OpenBSD device number.
+func Major(dev uint64) uint32 {
+	return uint32((dev & 0x0000ff00) >> 8)
+}
+
+// Minor returns the minor component of an OpenBSD device number.
+func Minor(dev uint64) uint32 {
+	minor := uint32((dev & 0x000000ff) >> 0)
+	minor |= uint32((dev & 0xffff0000) >> 8)
+	return minor
+}
+
+// Mkdev returns an OpenBSD device number generated from the given major and minor
+// components.
+func Mkdev(major, minor uint32) uint64 {
+	dev := (uint64(major) << 8) & 0x0000ff00
+	dev |= (uint64(minor) << 8) & 0xffff0000
+	dev |= (uint64(minor) << 0) & 0x000000ff
+	return dev
+}
diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go
index 45e281a..2e06b33 100644
--- a/vendor/golang.org/x/sys/unix/env_unix.go
+++ b/vendor/golang.org/x/sys/unix/env_unix.go
@@ -1,4 +1,4 @@
-// Copyright 2010 The Go Authors.  All rights reserved.
+// Copyright 2010 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/golang.org/x/sys/unix/env_unset.go
index 9222262..c44fdc4 100644
--- a/vendor/golang.org/x/sys/unix/env_unset.go
+++ b/vendor/golang.org/x/sys/unix/env_unset.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Go Authors.  All rights reserved.
+// Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go
index 94c8232..40bed3f 100644
--- a/vendor/golang.org/x/sys/unix/gccgo.go
+++ b/vendor/golang.org/x/sys/unix/gccgo.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors.  All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -8,7 +8,7 @@
 
 import "syscall"
 
-// We can't use the gc-syntax .s files for gccgo.  On the plus side
+// We can't use the gc-syntax .s files for gccgo. On the plus side
 // much of the functionality can be written directly in Go.
 
 //extern gccgoRealSyscall
diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c
index 07f6be0..99a774f 100644
--- a/vendor/golang.org/x/sys/unix/gccgo_c.c
+++ b/vendor/golang.org/x/sys/unix/gccgo_c.c
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors.  All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
index bffe1a7..251a977 100644
--- a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go
@@ -1,4 +1,4 @@
-// Copyright 2015 The Go Authors.  All rights reserved.
+// Copyright 2015 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go b/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go
deleted file mode 100644
index 5633269..0000000
--- a/vendor/golang.org/x/sys/unix/gccgo_linux_sparc64.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright 2016 The Go Authors.  All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build gccgo,linux,sparc64
-
-package unix
-
-import "syscall"
-
-//extern sysconf
-func realSysconf(name int) int64
-
-func sysconf(name int) (n int64, err syscall.Errno) {
-	r := realSysconf(name)
-	if r < 0 {
-		return 0, syscall.GetErrno()
-	}
-	return r, 0
-}
diff --git a/vendor/golang.org/x/sys/unix/pagesize_unix.go b/vendor/golang.org/x/sys/unix/pagesize_unix.go
new file mode 100644
index 0000000..83c85e0
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/pagesize_unix.go
@@ -0,0 +1,15 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+// For Unix, get the pagesize from the runtime.
+
+package unix
+
+import "syscall"
+
+func Getpagesize() int {
+	return syscall.Getpagesize()
+}
diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/golang.org/x/sys/unix/race.go
index 3c7627e..61712b5 100644
--- a/vendor/golang.org/x/sys/unix/race.go
+++ b/vendor/golang.org/x/sys/unix/race.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors.  All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/golang.org/x/sys/unix/race0.go
index f8678e0..dd08204 100644
--- a/vendor/golang.org/x/sys/unix/race0.go
+++ b/vendor/golang.org/x/sys/unix/race0.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors.  All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go
index d9ff473..6079eb4 100644
--- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go
+++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors.  All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go
index 85e3502..857d2a4 100644
--- a/vendor/golang.org/x/sys/unix/syscall.go
+++ b/vendor/golang.org/x/sys/unix/syscall.go
@@ -5,10 +5,10 @@
 // +build darwin dragonfly freebsd linux netbsd openbsd solaris
 
 // Package unix contains an interface to the low-level operating system
-// primitives.  OS details vary depending on the underlying system, and
+// primitives. OS details vary depending on the underlying system, and
 // by default, godoc will display OS-specific documentation for the current
-// system.  If you want godoc to display OS documentation for another
-// system, set $GOOS and $GOARCH to the desired system.  For example, if
+// system. If you want godoc to display OS documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
 // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
 // to freebsd and $GOARCH to arm.
 // The primary use of this package is inside other packages that provide a more
@@ -49,21 +49,3 @@
 // Single-word zero for use when we need a valid pointer to 0 bytes.
 // See mkunix.pl.
 var _zero uintptr
-
-func (ts *Timespec) Unix() (sec int64, nsec int64) {
-	return int64(ts.Sec), int64(ts.Nsec)
-}
-
-func (tv *Timeval) Unix() (sec int64, nsec int64) {
-	return int64(tv.Sec), int64(tv.Usec) * 1000
-}
-
-func (ts *Timespec) Nano() int64 {
-	return int64(ts.Sec)*1e9 + int64(ts.Nsec)
-}
-
-func (tv *Timeval) Nano() int64 {
-	return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
-}
-
-func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go
index c2846b3..4119edd 100644
--- a/vendor/golang.org/x/sys/unix/syscall_bsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go
@@ -34,7 +34,7 @@
 		return nil, nil
 	}
 
-	// Sanity check group count.  Max is 16 on BSD.
+	// Sanity check group count. Max is 16 on BSD.
 	if n < 0 || n > 1000 {
 		return nil, EINVAL
 	}
@@ -607,6 +607,15 @@
 
 //sys	fcntl(fd int, cmd int, arg int) (val int, err error)
 
+//sys   poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+	if len(fds) == 0 {
+		return poll(nil, 0, timeout)
+	}
+	return poll(&fds[0], len(fds), timeout)
+}
+
 // TODO: wrap
 //	Acct(name nil-string) (err error)
 //	Gethostuuid(uuid *byte, timeout *Timespec) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index ad74a11..f6a8fcc 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -54,7 +54,7 @@
 
 	// NOTE(rsc): It seems strange to set the buffer to have
 	// size CTL_MAXNAME+2 but use only CTL_MAXNAME
-	// as the size.  I don't know why the +2 is here, but the
+	// as the size. I don't know why the +2 is here, but the
 	// kernel uses +2 for its own implementation of this function.
 	// I am scared that if we don't include the +2 here, the kernel
 	// will silently write 2 words farther than we specify
@@ -377,7 +377,6 @@
 // Searchfs
 // Delete
 // Copyfile
-// Poll
 // Watchevent
 // Waitevent
 // Modwatch
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
index c172a3d..b3ac109 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go
@@ -11,27 +11,18 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int32(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: int32(sec), Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int32(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: int32(sec), Usec: int32(usec)}
 }
 
 //sysnb	gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
 func Gettimeofday(tv *Timeval) (err error) {
 	// The tv passed to gettimeofday must be non-nil
-	// but is otherwise unused.  The answers come back
+	// but is otherwise unused. The answers come back
 	// in the two registers.
 	sec, usec, err := gettimeofday(tv)
 	tv.Sec = int32(sec)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
index c6c99c1..7521944 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go
@@ -11,27 +11,18 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 //sysnb	gettimeofday(tp *Timeval) (sec int64, usec int32, err error)
 func Gettimeofday(tv *Timeval) (err error) {
 	// The tv passed to gettimeofday must be non-nil
-	// but is otherwise unused.  The answers come back
+	// but is otherwise unused. The answers come back
 	// in the two registers.
 	sec, usec, err := gettimeofday(tv)
 	tv.Sec = sec
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
index d286cf4..47ab664 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go
@@ -9,27 +9,18 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int32(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: int32(sec), Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int32(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: int32(sec), Usec: int32(usec)}
 }
 
 //sysnb	gettimeofday(tp *Timeval) (sec int32, usec int32, err error)
 func Gettimeofday(tv *Timeval) (err error) {
 	// The tv passed to gettimeofday must be non-nil
-	// but is otherwise unused.  The answers come back
+	// but is otherwise unused. The answers come back
 	// in the two registers.
 	sec, usec, err := gettimeofday(tv)
 	tv.Sec = int32(sec)
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
index c33905c..d6d9628 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go
@@ -11,27 +11,18 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 16384 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 //sysnb	gettimeofday(tp *Timeval) (sec int64, usec int32, err error)
 func Gettimeofday(tv *Timeval) (err error) {
 	// The tv passed to gettimeofday must be non-nil
-	// but is otherwise unused.  The answers come back
+	// but is otherwise unused. The answers come back
 	// in the two registers.
 	sec, usec, err := gettimeofday(tv)
 	tv.Sec = sec
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
index 3a48337..fee0683 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go
@@ -257,7 +257,6 @@
 // Searchfs
 // Delete
 // Copyfile
-// Poll
 // Watchevent
 // Waitevent
 // Modwatch
@@ -403,7 +402,6 @@
 // Pread_nocancel
 // Pwrite_nocancel
 // Waitid_nocancel
-// Poll_nocancel
 // Msgsnd_nocancel
 // Msgrcv_nocancel
 // Sem_wait_nocancel
diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
index da7cb79..9babb31 100644
--- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go
@@ -11,21 +11,12 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = nsec % 1e9 / 1e3
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
index d26e52e..8f7ab16 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go
@@ -32,7 +32,7 @@
 
 	// NOTE(rsc): It seems strange to set the buffer to have
 	// size CTL_MAXNAME+2 but use only CTL_MAXNAME
-	// as the size.  I don't know why the +2 is here, but the
+	// as the size. I don't know why the +2 is here, but the
 	// kernel uses +2 for its own implementation of this function.
 	// I am scared that if we don't include the +2 here, the kernel
 	// will silently write 2 words farther than we specify
@@ -550,7 +550,6 @@
 // Searchfs
 // Delete
 // Copyfile
-// Poll
 // Watchevent
 // Waitevent
 // Modwatch
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
index 6a0cd80..21e0395 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go
@@ -11,21 +11,12 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int32(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: int32(sec), Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int32(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: int32(sec), Usec: int32(usec)}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
index e142540..9c945a6 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go
@@ -11,21 +11,12 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = nsec % 1e9 / 1e3
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
index 5504cb1..5cd6243 100644
--- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go
@@ -11,21 +11,12 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return ts.Sec*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = nsec / 1e9
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 2afe62b..b98a7e1 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -255,7 +255,7 @@
 		return nil, nil
 	}
 
-	// Sanity check group count.  Max is 1<<16 on Linux.
+	// Sanity check group count. Max is 1<<16 on Linux.
 	if n < 0 || n > 1<<20 {
 		return nil, EINVAL
 	}
@@ -290,8 +290,8 @@
 // 0x7F (stopped), or a signal number that caused an exit.
 // The 0x80 bit is whether there was a core dump.
 // An extra number (exit code, signal causing a stop)
-// is in the high bits.  At least that's the idea.
-// There are various irregularities.  For example, the
+// is in the high bits. At least that's the idea.
+// There are various irregularities. For example, the
 // "continued" status is 0xFFFF, distinguishing itself
 // from stopped via the core dump bit.
 
@@ -926,17 +926,22 @@
 	msg.Namelen = uint32(SizeofSockaddrAny)
 	var iov Iovec
 	if len(p) > 0 {
-		iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+		iov.Base = &p[0]
 		iov.SetLen(len(p))
 	}
 	var dummy byte
 	if len(oob) > 0 {
+		var sockType int
+		sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
+		if err != nil {
+			return
+		}
 		// receive at least one normal byte
-		if len(p) == 0 {
+		if sockType != SOCK_DGRAM && len(p) == 0 {
 			iov.Base = &dummy
 			iov.SetLen(1)
 		}
-		msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+		msg.Control = &oob[0]
 		msg.SetControllen(len(oob))
 	}
 	msg.Iov = &iov
@@ -969,21 +974,26 @@
 		}
 	}
 	var msg Msghdr
-	msg.Name = (*byte)(unsafe.Pointer(ptr))
+	msg.Name = (*byte)(ptr)
 	msg.Namelen = uint32(salen)
 	var iov Iovec
 	if len(p) > 0 {
-		iov.Base = (*byte)(unsafe.Pointer(&p[0]))
+		iov.Base = &p[0]
 		iov.SetLen(len(p))
 	}
 	var dummy byte
 	if len(oob) > 0 {
+		var sockType int
+		sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE)
+		if err != nil {
+			return 0, err
+		}
 		// send at least one normal byte
-		if len(p) == 0 {
+		if sockType != SOCK_DGRAM && len(p) == 0 {
 			iov.Base = &dummy
 			iov.SetLen(1)
 		}
-		msg.Control = (*byte)(unsafe.Pointer(&oob[0]))
+		msg.Control = &oob[0]
 		msg.SetControllen(len(oob))
 	}
 	msg.Iov = &iov
@@ -1013,7 +1023,7 @@
 
 	var buf [sizeofPtr]byte
 
-	// Leading edge.  PEEKTEXT/PEEKDATA don't require aligned
+	// Leading edge. PEEKTEXT/PEEKDATA don't require aligned
 	// access (PEEKUSER warns that it might), but if we don't
 	// align our reads, we might straddle an unmapped page
 	// boundary and not get the bytes leading up to the page
@@ -1252,6 +1262,7 @@
 //sys	PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT
 //sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64
 //sys   Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error)
+//sys	Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) = SYS_PSELECT6
 //sys	read(fd int, p []byte) (n int, err error)
 //sys	Removexattr(path string, attr string) (err error)
 //sys	Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
index 2b881b9..4774fa3 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go
@@ -14,21 +14,12 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int32(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: int32(sec), Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = int32(nsec / 1e9)
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: int32(sec), Usec: int32(usec)}
 }
 
 //sysnb	pipe(p *[2]_C_int) (err error)
@@ -185,9 +176,9 @@
 
 // On x86 Linux, all the socket calls go through an extra indirection,
 // I think because the 5-register system call interface can't handle
-// the 6-argument calls like sendto and recvfrom.  Instead the
+// the 6-argument calls like sendto and recvfrom. Instead the
 // arguments to the underlying system call are the number below
-// and a pointer to an array of uintptr.  We hide the pointer in the
+// and a pointer to an array of uintptr. We hide the pointer in the
 // socketcall assembly to avoid allocation on every system call.
 
 const (
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
index 9516a3f..3707f6b 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go
@@ -69,8 +69,6 @@
 	return nil
 }
 
-func Getpagesize() int { return 4096 }
-
 func Time(t *Time_t) (tt Time_t, err error) {
 	var tv Timeval
 	errno := gettimeofday(&tv)
@@ -85,19 +83,12 @@
 
 //sys	Utime(path string, buf *Utimbuf) (err error)
 
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = nsec / 1e9
-	tv.Usec = nsec % 1e9 / 1e3
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 //sysnb	pipe(p *[2]_C_int) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
index 71d8702..226be10 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go
@@ -11,21 +11,12 @@
 	"unsafe"
 )
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int32(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: int32(sec), Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = int32(nsec / 1e9)
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: int32(sec), Usec: int32(usec)}
 }
 
 func Pipe(p []int) (err error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
index 4a13639..9a8e6e4 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go
@@ -21,7 +21,12 @@
 //sys	Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
 //sys	Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
 //sys	Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
-//sys	Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+	ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
+	return Pselect(nfd, r, w, e, &ts, nil)
+}
+
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
 //sys	Setfsgid(gid int) (err error)
 //sys	Setfsuid(uid int) (err error)
@@ -66,23 +71,14 @@
 //sys	sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
 
-func Getpagesize() int { return 65536 }
-
 //sysnb	Gettimeofday(tv *Timeval) (err error)
 
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = nsec / 1e9
-	tv.Usec = nsec % 1e9 / 1e3
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 func Time(t *Time_t) (Time_t, error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
index 73318e5..cdda11a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go
@@ -23,7 +23,12 @@
 //sys	Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
 //sys	Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
 //sys	Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
-//sys	Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS_PSELECT6
+
+func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
+	ts := Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000}
+	return Pselect(nfd, r, w, e, &ts, nil)
+}
+
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
 //sys	Setfsgid(gid int) (err error)
 //sys	Setfsuid(uid int) (err error)
@@ -55,8 +60,6 @@
 //sys	sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
 
-func Getpagesize() int { return 65536 }
-
 //sysnb	Gettimeofday(tv *Timeval) (err error)
 
 func Time(t *Time_t) (tt Time_t, err error) {
@@ -73,19 +76,12 @@
 
 //sys	Utime(path string, buf *Utimbuf) (err error)
 
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = nsec / 1e9
-	tv.Usec = nsec % 1e9 / 1e3
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 func Pipe(p []int) (err error) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
index b83d93f..a114ba8 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go
@@ -99,19 +99,12 @@
 	return
 }
 
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int32(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: int32(sec), Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = int32(nsec / 1e9)
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: int32(sec), Usec: int32(usec)}
 }
 
 //sysnb pipe2(p *[2]_C_int, flags int) (err error)
@@ -235,5 +228,3 @@
 	}
 	return poll(&fds[0], len(fds), timeout)
 }
-
-func Getpagesize() int { return 4096 }
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
index 60770f6..7cae936 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go
@@ -28,7 +28,7 @@
 //sys	Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64
 //sys	Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64
 //sys	Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK
-//sys	Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error)
+//sys	Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) = SYS__NEWSELECT
 //sys	sendfile(outfd int, infd int, offset *int64, count int) (written int, err error)
 //sys	Setfsgid(gid int) (err error)
 //sys	Setfsuid(uid int) (err error)
@@ -61,26 +61,17 @@
 //sys	sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
 
-func Getpagesize() int { return 65536 }
-
 //sysnb	Gettimeofday(tv *Timeval) (err error)
 //sysnb	Time(t *Time_t) (tt Time_t, err error)
 
 //sys	Utime(path string, buf *Utimbuf) (err error)
 
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = nsec / 1e9
-	tv.Usec = nsec % 1e9 / 1e3
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 func (r *PtraceRegs) PC() uint64 { return r.Nip }
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
index 1708a4b..e96a40c 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go
@@ -46,8 +46,6 @@
 //sysnb	getgroups(n int, list *_Gid_t) (nn int, err error)
 //sysnb	setgroups(n int, list *_Gid_t) (err error)
 
-func Getpagesize() int { return 4096 }
-
 //sysnb	Gettimeofday(tv *Timeval) (err error)
 
 func Time(t *Time_t) (tt Time_t, err error) {
@@ -64,19 +62,12 @@
 
 //sys	Utime(path string, buf *Utimbuf) (err error)
 
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = nsec / 1e9
-	tv.Usec = nsec % 1e9 / 1e3
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 //sysnb pipe2(p *[2]_C_int, flags int) (err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
index 20b7454..012a328 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go
@@ -6,11 +6,6 @@
 
 package unix
 
-import (
-	"sync/atomic"
-	"syscall"
-)
-
 //sys	EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error)
 //sys	Dup2(oldfd int, newfd int) (err error)
 //sys	Fchown(fd int, uid int, gid int) (err error)
@@ -63,21 +58,6 @@
 //sys	sendmsg(s int, msg *Msghdr, flags int) (n int, err error)
 //sys	mmap(addr uintptr, length uintptr, prot int, flags int, fd int, offset int64) (xaddr uintptr, err error)
 
-func sysconf(name int) (n int64, err syscall.Errno)
-
-// pageSize caches the value of Getpagesize, since it can't change
-// once the system is booted.
-var pageSize int64 // accessed atomically
-
-func Getpagesize() int {
-	n := atomic.LoadInt64(&pageSize)
-	if n == 0 {
-		n, _ = sysconf(_SC_PAGESIZE)
-		atomic.StoreInt64(&pageSize, n)
-	}
-	return int(n)
-}
-
 func Ioperm(from int, num int, on int) (err error) {
 	return ENOSYS
 }
@@ -102,19 +82,12 @@
 
 //sys	Utime(path string, buf *Utimbuf) (err error)
 
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Sec = nsec / 1e9
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 func (r *PtraceRegs) PC() uint64 { return r.Tpc }
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
index e129668..1caa5b3 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go
@@ -422,7 +422,6 @@
 // ntp_adjtime
 // pmc_control
 // pmc_get_info
-// poll
 // pollts
 // preadv
 // profil
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
index afaca09..24f74e5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go
@@ -6,21 +6,12 @@
 
 package unix
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int64(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
index a6ff04c..6878bf7 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go
@@ -6,21 +6,12 @@
 
 package unix
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int64(nsec / 1e9)
-	ts.Nsec = int64(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
index 68a6969..dbbfcf7 100644
--- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go
@@ -6,21 +6,12 @@
 
 package unix
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int64(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
index 408e630..03a0fac 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go
@@ -243,7 +243,6 @@
 // nfssvc
 // nnpfspioctl
 // openat
-// poll
 // preadv
 // profil
 // pwritev
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
index a66ddc5..994964a 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go
@@ -6,21 +6,12 @@
 
 package unix
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int64(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
index 0776c1f..649e67f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go
@@ -6,21 +6,12 @@
 
 package unix
 
-func Getpagesize() int { return 4096 }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = nsec % 1e9 / 1e3
-	tv.Sec = nsec / 1e9
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
index 14ddaf3..59844f5 100644
--- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go
@@ -6,23 +6,12 @@
 
 package unix
 
-import "syscall"
-
-func Getpagesize() int { return syscall.Getpagesize() }
-
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = int64(nsec / 1e9)
-	ts.Nsec = int32(nsec % 1e9)
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: int32(nsec)}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = int32(nsec % 1e9 / 1e3)
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: int32(usec)}
 }
 
 func SetKevent(k *Kevent_t, fd, mode, flags int) {
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 0d4e5c4..3ab9e07 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -13,7 +13,6 @@
 package unix
 
 import (
-	"sync/atomic"
 	"syscall"
 	"unsafe"
 )
@@ -167,7 +166,7 @@
 
 func Getgroups() (gids []int, err error) {
 	n, err := getgroups(0, nil)
-	// Check for error and sanity check group count.  Newer versions of
+	// Check for error and sanity check group count. Newer versions of
 	// Solaris allow up to 1024 (NGROUPS_MAX).
 	if n < 0 || n > 1024 {
 		if err != nil {
@@ -351,7 +350,7 @@
 }
 
 // Solaris doesn't have an futimes function because it allows NULL to be
-// specified as the path for futimesat.  However, Go doesn't like
+// specified as the path for futimesat. However, Go doesn't like
 // NULL-style string interfaces, so this simple wrapper is provided.
 func Futimes(fd int, tv []Timeval) error {
 	if tv == nil {
@@ -515,6 +514,24 @@
 	return acct(pathp)
 }
 
+//sys	__makedev(version int, major uint, minor uint) (val uint64)
+
+func Mkdev(major, minor uint32) uint64 {
+	return __makedev(NEWDEV, uint(major), uint(minor))
+}
+
+//sys	__major(version int, dev uint64) (val uint)
+
+func Major(dev uint64) uint32 {
+	return uint32(__major(NEWDEV, dev))
+}
+
+//sys	__minor(version int, dev uint64) (val uint)
+
+func Minor(dev uint64) uint32 {
+	return uint32(__minor(NEWDEV, dev))
+}
+
 /*
  * Expose the ioctl function
  */
@@ -561,6 +578,15 @@
 	return &value, err
 }
 
+//sys   poll(fds *PollFd, nfds int, timeout int) (n int, err error)
+
+func Poll(fds []PollFd, timeout int) (n int, err error) {
+	if len(fds) == 0 {
+		return poll(nil, 0, timeout)
+	}
+	return poll(&fds[0], len(fds), timeout)
+}
+
 /*
  * Exposed directly
  */
@@ -613,6 +639,7 @@
 //sys	Mlock(b []byte) (err error)
 //sys	Mlockall(flags int) (err error)
 //sys	Mprotect(b []byte, prot int) (err error)
+//sys	Msync(b []byte, flags int) (err error)
 //sys	Munlock(b []byte) (err error)
 //sys	Munlockall() (err error)
 //sys	Nanosleep(time *Timespec, leftover *Timespec) (err error)
@@ -699,18 +726,3 @@
 func Munmap(b []byte) (err error) {
 	return mapper.Munmap(b)
 }
-
-//sys	sysconf(name int) (n int64, err error)
-
-// pageSize caches the value of Getpagesize, since it can't change
-// once the system is booted.
-var pageSize int64 // accessed atomically
-
-func Getpagesize() int {
-	n := atomic.LoadInt64(&pageSize)
-	if n == 0 {
-		n, _ = sysconf(_SC_PAGESIZE)
-		atomic.StoreInt64(&pageSize, n)
-	}
-	return int(n)
-}
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
index 5aff62c..9d4e7a6 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go
@@ -6,19 +6,12 @@
 
 package unix
 
-func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
-
-func NsecToTimespec(nsec int64) (ts Timespec) {
-	ts.Sec = nsec / 1e9
-	ts.Nsec = nsec % 1e9
-	return
+func setTimespec(sec, nsec int64) Timespec {
+	return Timespec{Sec: sec, Nsec: nsec}
 }
 
-func NsecToTimeval(nsec int64) (tv Timeval) {
-	nsec += 999 // round up to microsecond
-	tv.Usec = nsec % 1e9 / 1e3
-	tv.Sec = int64(nsec / 1e9)
-	return
+func setTimeval(sec, usec int64) Timeval {
+	return Timeval{Sec: sec, Usec: usec}
 }
 
 func (iov *Iovec) SetLen(length int) {
diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go
new file mode 100644
index 0000000..139fbbe
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/timestruct.go
@@ -0,0 +1,62 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package unix
+
+// TimespecToNsec converts a Timespec value into a number of
+// nanoseconds since the Unix epoch.
+func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) }
+
+// NsecToTimespec takes a number of nanoseconds since the Unix epoch
+// and returns the corresponding Timespec value.
+func NsecToTimespec(nsec int64) Timespec {
+	sec := nsec / 1e9
+	nsec = nsec % 1e9
+	if nsec < 0 {
+		nsec += 1e9
+		sec--
+	}
+	return setTimespec(sec, nsec)
+}
+
+// TimevalToNsec converts a Timeval value into a number of nanoseconds
+// since the Unix epoch.
+func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 }
+
+// NsecToTimeval takes a number of nanoseconds since the Unix epoch
+// and returns the corresponding Timeval value.
+func NsecToTimeval(nsec int64) Timeval {
+	nsec += 999 // round up to microsecond
+	usec := nsec % 1e9 / 1e3
+	sec := nsec / 1e9
+	if usec < 0 {
+		usec += 1e6
+		sec--
+	}
+	return setTimeval(sec, usec)
+}
+
+// Unix returns ts as the number of seconds and nanoseconds elapsed since the
+// Unix epoch.
+func (ts *Timespec) Unix() (sec int64, nsec int64) {
+	return int64(ts.Sec), int64(ts.Nsec)
+}
+
+// Unix returns tv as the number of seconds and nanoseconds elapsed since the
+// Unix epoch.
+func (tv *Timeval) Unix() (sec int64, nsec int64) {
+	return int64(tv.Sec), int64(tv.Usec) * 1000
+}
+
+// Nano returns ts as the number of nanoseconds elapsed since the Unix epoch.
+func (ts *Timespec) Nano() int64 {
+	return int64(ts.Sec)*1e9 + int64(ts.Nsec)
+}
+
+// Nano returns tv as the number of nanoseconds elapsed since the Unix epoch.
+func (tv *Timeval) Nano() int64 {
+	return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
index 1d3eec4..adf5eef 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go
@@ -981,6 +981,49 @@
 	MAP_STACK                      = 0x400
 	MCL_CURRENT                    = 0x1
 	MCL_FUTURE                     = 0x2
+	MNT_ACLS                       = 0x8000000
+	MNT_ASYNC                      = 0x40
+	MNT_AUTOMOUNTED                = 0x200000000
+	MNT_BYFSID                     = 0x8000000
+	MNT_CMDFLAGS                   = 0xd0f0000
+	MNT_DEFEXPORTED                = 0x200
+	MNT_DELEXPORT                  = 0x20000
+	MNT_EXKERB                     = 0x800
+	MNT_EXPORTANON                 = 0x400
+	MNT_EXPORTED                   = 0x100
+	MNT_EXPUBLIC                   = 0x20000000
+	MNT_EXRDONLY                   = 0x80
+	MNT_FORCE                      = 0x80000
+	MNT_GJOURNAL                   = 0x2000000
+	MNT_IGNORE                     = 0x800000
+	MNT_LAZY                       = 0x3
+	MNT_LOCAL                      = 0x1000
+	MNT_MULTILABEL                 = 0x4000000
+	MNT_NFS4ACLS                   = 0x10
+	MNT_NOATIME                    = 0x10000000
+	MNT_NOCLUSTERR                 = 0x40000000
+	MNT_NOCLUSTERW                 = 0x80000000
+	MNT_NOEXEC                     = 0x4
+	MNT_NONBUSY                    = 0x4000000
+	MNT_NOSUID                     = 0x8
+	MNT_NOSYMFOLLOW                = 0x400000
+	MNT_NOWAIT                     = 0x2
+	MNT_QUOTA                      = 0x2000
+	MNT_RDONLY                     = 0x1
+	MNT_RELOAD                     = 0x40000
+	MNT_ROOTFS                     = 0x4000
+	MNT_SNAPSHOT                   = 0x1000000
+	MNT_SOFTDEP                    = 0x200000
+	MNT_SUIDDIR                    = 0x100000
+	MNT_SUJ                        = 0x100000000
+	MNT_SUSPEND                    = 0x4
+	MNT_SYNCHRONOUS                = 0x2
+	MNT_UNION                      = 0x20
+	MNT_UPDATE                     = 0x10000
+	MNT_UPDATEMASK                 = 0x2d8d0807e
+	MNT_USER                       = 0x8000
+	MNT_VISFLAGMASK                = 0x3fef0ffff
+	MNT_WAIT                       = 0x1
 	MSG_CMSG_CLOEXEC               = 0x40000
 	MSG_COMPAT                     = 0x8000
 	MSG_CTRUNC                     = 0x20
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
index ac094f9..360caff 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go
@@ -982,6 +982,49 @@
 	MAP_STACK                      = 0x400
 	MCL_CURRENT                    = 0x1
 	MCL_FUTURE                     = 0x2
+	MNT_ACLS                       = 0x8000000
+	MNT_ASYNC                      = 0x40
+	MNT_AUTOMOUNTED                = 0x200000000
+	MNT_BYFSID                     = 0x8000000
+	MNT_CMDFLAGS                   = 0xd0f0000
+	MNT_DEFEXPORTED                = 0x200
+	MNT_DELEXPORT                  = 0x20000
+	MNT_EXKERB                     = 0x800
+	MNT_EXPORTANON                 = 0x400
+	MNT_EXPORTED                   = 0x100
+	MNT_EXPUBLIC                   = 0x20000000
+	MNT_EXRDONLY                   = 0x80
+	MNT_FORCE                      = 0x80000
+	MNT_GJOURNAL                   = 0x2000000
+	MNT_IGNORE                     = 0x800000
+	MNT_LAZY                       = 0x3
+	MNT_LOCAL                      = 0x1000
+	MNT_MULTILABEL                 = 0x4000000
+	MNT_NFS4ACLS                   = 0x10
+	MNT_NOATIME                    = 0x10000000
+	MNT_NOCLUSTERR                 = 0x40000000
+	MNT_NOCLUSTERW                 = 0x80000000
+	MNT_NOEXEC                     = 0x4
+	MNT_NONBUSY                    = 0x4000000
+	MNT_NOSUID                     = 0x8
+	MNT_NOSYMFOLLOW                = 0x400000
+	MNT_NOWAIT                     = 0x2
+	MNT_QUOTA                      = 0x2000
+	MNT_RDONLY                     = 0x1
+	MNT_RELOAD                     = 0x40000
+	MNT_ROOTFS                     = 0x4000
+	MNT_SNAPSHOT                   = 0x1000000
+	MNT_SOFTDEP                    = 0x200000
+	MNT_SUIDDIR                    = 0x100000
+	MNT_SUJ                        = 0x100000000
+	MNT_SUSPEND                    = 0x4
+	MNT_SYNCHRONOUS                = 0x2
+	MNT_UNION                      = 0x20
+	MNT_UPDATE                     = 0x10000
+	MNT_UPDATEMASK                 = 0x2d8d0807e
+	MNT_USER                       = 0x8000
+	MNT_VISFLAGMASK                = 0x3fef0ffff
+	MNT_WAIT                       = 0x1
 	MSG_CMSG_CLOEXEC               = 0x40000
 	MSG_COMPAT                     = 0x8000
 	MSG_CTRUNC                     = 0x20
diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
index c5c6f13..87deda9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go
@@ -989,6 +989,49 @@
 	MAP_STACK                      = 0x400
 	MCL_CURRENT                    = 0x1
 	MCL_FUTURE                     = 0x2
+	MNT_ACLS                       = 0x8000000
+	MNT_ASYNC                      = 0x40
+	MNT_AUTOMOUNTED                = 0x200000000
+	MNT_BYFSID                     = 0x8000000
+	MNT_CMDFLAGS                   = 0xd0f0000
+	MNT_DEFEXPORTED                = 0x200
+	MNT_DELEXPORT                  = 0x20000
+	MNT_EXKERB                     = 0x800
+	MNT_EXPORTANON                 = 0x400
+	MNT_EXPORTED                   = 0x100
+	MNT_EXPUBLIC                   = 0x20000000
+	MNT_EXRDONLY                   = 0x80
+	MNT_FORCE                      = 0x80000
+	MNT_GJOURNAL                   = 0x2000000
+	MNT_IGNORE                     = 0x800000
+	MNT_LAZY                       = 0x3
+	MNT_LOCAL                      = 0x1000
+	MNT_MULTILABEL                 = 0x4000000
+	MNT_NFS4ACLS                   = 0x10
+	MNT_NOATIME                    = 0x10000000
+	MNT_NOCLUSTERR                 = 0x40000000
+	MNT_NOCLUSTERW                 = 0x80000000
+	MNT_NOEXEC                     = 0x4
+	MNT_NONBUSY                    = 0x4000000
+	MNT_NOSUID                     = 0x8
+	MNT_NOSYMFOLLOW                = 0x400000
+	MNT_NOWAIT                     = 0x2
+	MNT_QUOTA                      = 0x2000
+	MNT_RDONLY                     = 0x1
+	MNT_RELOAD                     = 0x40000
+	MNT_ROOTFS                     = 0x4000
+	MNT_SNAPSHOT                   = 0x1000000
+	MNT_SOFTDEP                    = 0x200000
+	MNT_SUIDDIR                    = 0x100000
+	MNT_SUJ                        = 0x100000000
+	MNT_SUSPEND                    = 0x4
+	MNT_SYNCHRONOUS                = 0x2
+	MNT_UNION                      = 0x20
+	MNT_UPDATE                     = 0x10000
+	MNT_UPDATEMASK                 = 0x2d8d0807e
+	MNT_USER                       = 0x8000
+	MNT_VISFLAGMASK                = 0x3fef0ffff
+	MNT_WAIT                       = 0x1
 	MSG_CMSG_CLOEXEC               = 0x40000
 	MSG_COMPAT                     = 0x8000
 	MSG_CTRUNC                     = 0x20
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index a6b3b5f..bb8a772 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x1000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -902,6 +930,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -916,6 +945,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -934,6 +964,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -954,8 +985,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1012,6 +1045,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1243,7 +1277,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1268,7 +1302,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1312,6 +1346,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1320,6 +1355,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1341,10 +1377,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1359,8 +1396,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1371,6 +1408,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1401,6 +1439,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1539,6 +1578,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1547,11 +1587,13 @@
 	SO_ERROR                             = 0x4
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x9
 	SO_LINGER                            = 0xd
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0xa
@@ -1559,6 +1601,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x11
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1f
 	SO_PRIORITY                          = 0xc
@@ -1626,6 +1669,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x540b
 	TCGETA                               = 0x5405
 	TCGETS                               = 0x5401
@@ -1649,6 +1698,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1708,6 +1758,7 @@
 	TIOCGPKT                             = 0x80045438
 	TIOCGPTLCK                           = 0x80045439
 	TIOCGPTN                             = 0x80045430
+	TIOCGPTPEER                          = 0x5441
 	TIOCGRS485                           = 0x542e
 	TIOCGSERIAL                          = 0x541e
 	TIOCGSID                             = 0x5429
@@ -1765,6 +1816,7 @@
 	TIOCSWINSZ                           = 0x5414
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x100
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x400854d5
 	TUNDETACHFILTER                      = 0x400854d6
 	TUNGETFEATURES                       = 0x800454cf
@@ -1790,6 +1842,8 @@
 	TUNSETVNETHDRSZ                      = 0x400454d8
 	TUNSETVNETLE                         = 0x400454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x4
 	VEOL                                 = 0xb
@@ -1819,6 +1873,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x80045702
+	WDIOC_GETPRETIMEOUT                  = 0x80045709
+	WDIOC_GETSTATUS                      = 0x80045701
+	WDIOC_GETSUPPORT                     = 0x80285700
+	WDIOC_GETTEMP                        = 0x80045703
+	WDIOC_GETTIMELEFT                    = 0x8004570a
+	WDIOC_GETTIMEOUT                     = 0x80045707
+	WDIOC_KEEPALIVE                      = 0x80045705
+	WDIOC_SETOPTIONS                     = 0x80045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
@@ -1999,7 +2064,6 @@
 	SIGTSTP   = syscall.Signal(0x14)
 	SIGTTIN   = syscall.Signal(0x15)
 	SIGTTOU   = syscall.Signal(0x16)
-	SIGUNUSED = syscall.Signal(0x1f)
 	SIGURG    = syscall.Signal(0x17)
 	SIGUSR1   = syscall.Signal(0xa)
 	SIGUSR2   = syscall.Signal(0xc)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index 4ffc8d2..cf0b224 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x1000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -902,6 +930,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -916,6 +945,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -934,6 +964,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -954,8 +985,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1012,6 +1045,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1153,7 +1187,7 @@
 	PR_SET_NO_NEW_PRIVS                  = 0x26
 	PR_SET_PDEATHSIG                     = 0x1
 	PR_SET_PTRACER                       = 0x59616d61
-	PR_SET_PTRACER_ANY                   = -0x1
+	PR_SET_PTRACER_ANY                   = 0xffffffffffffffff
 	PR_SET_SECCOMP                       = 0x16
 	PR_SET_SECUREBITS                    = 0x1c
 	PR_SET_THP_DISABLE                   = 0x29
@@ -1244,7 +1278,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1269,7 +1303,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1313,6 +1347,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1321,6 +1356,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1342,10 +1378,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1360,8 +1397,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1372,6 +1409,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1402,6 +1440,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1540,6 +1579,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1548,11 +1588,13 @@
 	SO_ERROR                             = 0x4
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x9
 	SO_LINGER                            = 0xd
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0xa
@@ -1560,6 +1602,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x11
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1f
 	SO_PRIORITY                          = 0xc
@@ -1627,6 +1670,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x540b
 	TCGETA                               = 0x5405
 	TCGETS                               = 0x5401
@@ -1650,6 +1699,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1709,6 +1759,7 @@
 	TIOCGPKT                             = 0x80045438
 	TIOCGPTLCK                           = 0x80045439
 	TIOCGPTN                             = 0x80045430
+	TIOCGPTPEER                          = 0x5441
 	TIOCGRS485                           = 0x542e
 	TIOCGSERIAL                          = 0x541e
 	TIOCGSID                             = 0x5429
@@ -1766,6 +1817,7 @@
 	TIOCSWINSZ                           = 0x5414
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x100
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x401054d5
 	TUNDETACHFILTER                      = 0x401054d6
 	TUNGETFEATURES                       = 0x800454cf
@@ -1791,6 +1843,8 @@
 	TUNSETVNETHDRSZ                      = 0x400454d8
 	TUNSETVNETLE                         = 0x400454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x4
 	VEOL                                 = 0xb
@@ -1820,6 +1874,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x80045702
+	WDIOC_GETPRETIMEOUT                  = 0x80045709
+	WDIOC_GETSTATUS                      = 0x80045701
+	WDIOC_GETSUPPORT                     = 0x80285700
+	WDIOC_GETTEMP                        = 0x80045703
+	WDIOC_GETTIMELEFT                    = 0x8004570a
+	WDIOC_GETTIMEOUT                     = 0x80045707
+	WDIOC_KEEPALIVE                      = 0x80045705
+	WDIOC_SETOPTIONS                     = 0x80045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
@@ -2000,7 +2065,6 @@
 	SIGTSTP   = syscall.Signal(0x14)
 	SIGTTIN   = syscall.Signal(0x15)
 	SIGTTOU   = syscall.Signal(0x16)
-	SIGUNUSED = syscall.Signal(0x1f)
 	SIGURG    = syscall.Signal(0x17)
 	SIGUSR1   = syscall.Signal(0xa)
 	SIGUSR2   = syscall.Signal(0xc)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index f4b178e..57cfcf3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x1000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -901,6 +929,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -915,6 +944,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -933,6 +963,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -953,8 +984,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1011,6 +1044,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1248,7 +1282,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1273,7 +1307,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1317,6 +1351,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1325,6 +1360,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1346,10 +1382,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1364,8 +1401,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1376,6 +1413,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1406,6 +1444,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1544,6 +1583,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1552,11 +1592,13 @@
 	SO_ERROR                             = 0x4
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x9
 	SO_LINGER                            = 0xd
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0xa
@@ -1564,6 +1606,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x11
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1f
 	SO_PRIORITY                          = 0xc
@@ -1631,6 +1674,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x540b
 	TCGETA                               = 0x5405
 	TCGETS                               = 0x5401
@@ -1654,6 +1703,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1713,6 +1763,7 @@
 	TIOCGPKT                             = 0x80045438
 	TIOCGPTLCK                           = 0x80045439
 	TIOCGPTN                             = 0x80045430
+	TIOCGPTPEER                          = 0x5441
 	TIOCGRS485                           = 0x542e
 	TIOCGSERIAL                          = 0x541e
 	TIOCGSID                             = 0x5429
@@ -1770,6 +1821,7 @@
 	TIOCSWINSZ                           = 0x5414
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x100
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x400854d5
 	TUNDETACHFILTER                      = 0x400854d6
 	TUNGETFEATURES                       = 0x800454cf
@@ -1795,6 +1847,8 @@
 	TUNSETVNETHDRSZ                      = 0x400454d8
 	TUNSETVNETLE                         = 0x400454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x4
 	VEOL                                 = 0xb
@@ -1824,6 +1878,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x80045702
+	WDIOC_GETPRETIMEOUT                  = 0x80045709
+	WDIOC_GETSTATUS                      = 0x80045701
+	WDIOC_GETSUPPORT                     = 0x80285700
+	WDIOC_GETTEMP                        = 0x80045703
+	WDIOC_GETTIMELEFT                    = 0x8004570a
+	WDIOC_GETTIMEOUT                     = 0x80045707
+	WDIOC_KEEPALIVE                      = 0x80045705
+	WDIOC_SETOPTIONS                     = 0x80045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
@@ -2004,7 +2069,6 @@
 	SIGTSTP   = syscall.Signal(0x14)
 	SIGTTIN   = syscall.Signal(0x15)
 	SIGTTOU   = syscall.Signal(0x16)
-	SIGUNUSED = syscall.Signal(0x1f)
 	SIGURG    = syscall.Signal(0x17)
 	SIGUSR1   = syscall.Signal(0xa)
 	SIGUSR2   = syscall.Signal(0xc)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 495f13b..b6e5b09 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -393,6 +395,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -441,6 +444,7 @@
 	EXTA                                 = 0xe
 	EXTB                                 = 0xf
 	EXTPROC                              = 0x10000
+	EXTRA_MAGIC                          = 0x45585401
 	FALLOC_FL_COLLAPSE_RANGE             = 0x8
 	FALLOC_FL_INSERT_RANGE               = 0x20
 	FALLOC_FL_KEEP_SIZE                  = 0x1
@@ -454,6 +458,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x1000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -508,6 +514,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -645,8 +664,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -659,12 +680,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -675,8 +698,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -690,7 +715,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -733,6 +760,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -770,6 +798,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -902,6 +931,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -916,6 +946,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -934,6 +965,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -954,8 +986,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1012,6 +1046,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1153,7 +1188,7 @@
 	PR_SET_NO_NEW_PRIVS                  = 0x26
 	PR_SET_PDEATHSIG                     = 0x1
 	PR_SET_PTRACER                       = 0x59616d61
-	PR_SET_PTRACER_ANY                   = -0x1
+	PR_SET_PTRACER_ANY                   = 0xffffffffffffffff
 	PR_SET_SECCOMP                       = 0x16
 	PR_SET_SECUREBITS                    = 0x1c
 	PR_SET_THP_DISABLE                   = 0x29
@@ -1233,7 +1268,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1258,7 +1293,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1302,6 +1337,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1310,6 +1346,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1331,10 +1368,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1349,8 +1387,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1361,6 +1399,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1391,6 +1430,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1529,6 +1569,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1537,11 +1578,13 @@
 	SO_ERROR                             = 0x4
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x9
 	SO_LINGER                            = 0xd
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0xa
@@ -1549,6 +1592,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x11
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1f
 	SO_PRIORITY                          = 0xc
@@ -1616,6 +1660,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x540b
 	TCGETA                               = 0x5405
 	TCGETS                               = 0x5401
@@ -1639,6 +1689,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1698,6 +1749,7 @@
 	TIOCGPKT                             = 0x80045438
 	TIOCGPTLCK                           = 0x80045439
 	TIOCGPTN                             = 0x80045430
+	TIOCGPTPEER                          = 0x5441
 	TIOCGRS485                           = 0x542e
 	TIOCGSERIAL                          = 0x541e
 	TIOCGSID                             = 0x5429
@@ -1755,6 +1807,7 @@
 	TIOCSWINSZ                           = 0x5414
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x100
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x401054d5
 	TUNDETACHFILTER                      = 0x401054d6
 	TUNGETFEATURES                       = 0x800454cf
@@ -1780,6 +1833,8 @@
 	TUNSETVNETHDRSZ                      = 0x400454d8
 	TUNSETVNETLE                         = 0x400454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x4
 	VEOL                                 = 0xb
@@ -1809,6 +1864,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x80045702
+	WDIOC_GETPRETIMEOUT                  = 0x80045709
+	WDIOC_GETSTATUS                      = 0x80045701
+	WDIOC_GETSUPPORT                     = 0x80285700
+	WDIOC_GETTEMP                        = 0x80045703
+	WDIOC_GETTIMELEFT                    = 0x8004570a
+	WDIOC_GETTIMEOUT                     = 0x80045707
+	WDIOC_KEEPALIVE                      = 0x80045705
+	WDIOC_SETOPTIONS                     = 0x80045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
@@ -1989,7 +2055,6 @@
 	SIGTSTP   = syscall.Signal(0x14)
 	SIGTTIN   = syscall.Signal(0x15)
 	SIGTTOU   = syscall.Signal(0x16)
-	SIGUNUSED = syscall.Signal(0x1f)
 	SIGURG    = syscall.Signal(0x17)
 	SIGUSR1   = syscall.Signal(0xa)
 	SIGUSR2   = syscall.Signal(0xc)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 59651e4..0113e1f 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x2000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -902,6 +930,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -916,6 +945,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -934,6 +964,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -954,8 +985,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1012,6 +1045,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1245,7 +1279,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1270,7 +1304,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1314,6 +1348,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1322,6 +1357,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1343,10 +1379,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1361,8 +1398,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1373,6 +1410,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1403,6 +1441,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1541,6 +1580,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1549,11 +1589,13 @@
 	SO_ERROR                             = 0x1007
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x8
 	SO_LINGER                            = 0x80
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0x100
@@ -1561,6 +1603,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x12
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1e
 	SO_PRIORITY                          = 0xc
@@ -1629,6 +1672,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x5407
 	TCGETA                               = 0x5401
 	TCGETS                               = 0x540d
@@ -1651,6 +1700,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1709,6 +1759,7 @@
 	TIOCGPKT                             = 0x40045438
 	TIOCGPTLCK                           = 0x40045439
 	TIOCGPTN                             = 0x40045430
+	TIOCGPTPEER                          = 0x20005441
 	TIOCGRS485                           = 0x4020542e
 	TIOCGSERIAL                          = 0x5484
 	TIOCGSID                             = 0x7416
@@ -1769,6 +1820,7 @@
 	TIOCSWINSZ                           = 0x80087467
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x8000
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x800854d5
 	TUNDETACHFILTER                      = 0x800854d6
 	TUNGETFEATURES                       = 0x400454cf
@@ -1794,6 +1846,8 @@
 	TUNSETVNETHDRSZ                      = 0x800454d8
 	TUNSETVNETLE                         = 0x800454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x10
 	VEOL                                 = 0x11
@@ -1824,6 +1878,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x40045702
+	WDIOC_GETPRETIMEOUT                  = 0x40045709
+	WDIOC_GETSTATUS                      = 0x40045701
+	WDIOC_GETSUPPORT                     = 0x40285700
+	WDIOC_GETTEMP                        = 0x40045703
+	WDIOC_GETTIMELEFT                    = 0x4004570a
+	WDIOC_GETTIMEOUT                     = 0x40045707
+	WDIOC_KEEPALIVE                      = 0x40045705
+	WDIOC_SETOPTIONS                     = 0x40045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index a09bf9b..6857657 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x2000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -902,6 +930,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -916,6 +945,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -934,6 +964,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -954,8 +985,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1012,6 +1045,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1153,7 +1187,7 @@
 	PR_SET_NO_NEW_PRIVS                  = 0x26
 	PR_SET_PDEATHSIG                     = 0x1
 	PR_SET_PTRACER                       = 0x59616d61
-	PR_SET_PTRACER_ANY                   = -0x1
+	PR_SET_PTRACER_ANY                   = 0xffffffffffffffff
 	PR_SET_SECCOMP                       = 0x16
 	PR_SET_SECUREBITS                    = 0x1c
 	PR_SET_THP_DISABLE                   = 0x29
@@ -1245,7 +1279,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1270,7 +1304,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1314,6 +1348,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1322,6 +1357,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1343,10 +1379,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1361,8 +1398,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1373,6 +1410,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1403,6 +1441,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1541,6 +1580,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1549,11 +1589,13 @@
 	SO_ERROR                             = 0x1007
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x8
 	SO_LINGER                            = 0x80
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0x100
@@ -1561,6 +1603,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x12
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1e
 	SO_PRIORITY                          = 0xc
@@ -1629,6 +1672,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x5407
 	TCGETA                               = 0x5401
 	TCGETS                               = 0x540d
@@ -1651,6 +1700,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1709,6 +1759,7 @@
 	TIOCGPKT                             = 0x40045438
 	TIOCGPTLCK                           = 0x40045439
 	TIOCGPTN                             = 0x40045430
+	TIOCGPTPEER                          = 0x20005441
 	TIOCGRS485                           = 0x4020542e
 	TIOCGSERIAL                          = 0x5484
 	TIOCGSID                             = 0x7416
@@ -1769,6 +1820,7 @@
 	TIOCSWINSZ                           = 0x80087467
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x8000
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x801054d5
 	TUNDETACHFILTER                      = 0x801054d6
 	TUNGETFEATURES                       = 0x400454cf
@@ -1794,6 +1846,8 @@
 	TUNSETVNETHDRSZ                      = 0x800454d8
 	TUNSETVNETLE                         = 0x800454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x10
 	VEOL                                 = 0x11
@@ -1824,6 +1878,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x40045702
+	WDIOC_GETPRETIMEOUT                  = 0x40045709
+	WDIOC_GETSTATUS                      = 0x40045701
+	WDIOC_GETSUPPORT                     = 0x40285700
+	WDIOC_GETTEMP                        = 0x40045703
+	WDIOC_GETTIMELEFT                    = 0x4004570a
+	WDIOC_GETTIMEOUT                     = 0x40045707
+	WDIOC_KEEPALIVE                      = 0x40045705
+	WDIOC_SETOPTIONS                     = 0x40045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index 72a0083..14f7e0e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x2000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -902,6 +930,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -916,6 +945,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -934,6 +964,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -954,8 +985,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1012,6 +1045,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1153,7 +1187,7 @@
 	PR_SET_NO_NEW_PRIVS                  = 0x26
 	PR_SET_PDEATHSIG                     = 0x1
 	PR_SET_PTRACER                       = 0x59616d61
-	PR_SET_PTRACER_ANY                   = -0x1
+	PR_SET_PTRACER_ANY                   = 0xffffffffffffffff
 	PR_SET_SECCOMP                       = 0x16
 	PR_SET_SECUREBITS                    = 0x1c
 	PR_SET_THP_DISABLE                   = 0x29
@@ -1245,7 +1279,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1270,7 +1304,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1314,6 +1348,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1322,6 +1357,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1343,10 +1379,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1361,8 +1398,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1373,6 +1410,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1403,6 +1441,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1541,6 +1580,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1549,11 +1589,13 @@
 	SO_ERROR                             = 0x1007
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x8
 	SO_LINGER                            = 0x80
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0x100
@@ -1561,6 +1603,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x12
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1e
 	SO_PRIORITY                          = 0xc
@@ -1629,6 +1672,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x5407
 	TCGETA                               = 0x5401
 	TCGETS                               = 0x540d
@@ -1651,6 +1700,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1709,6 +1759,7 @@
 	TIOCGPKT                             = 0x40045438
 	TIOCGPTLCK                           = 0x40045439
 	TIOCGPTN                             = 0x40045430
+	TIOCGPTPEER                          = 0x20005441
 	TIOCGRS485                           = 0x4020542e
 	TIOCGSERIAL                          = 0x5484
 	TIOCGSID                             = 0x7416
@@ -1769,6 +1820,7 @@
 	TIOCSWINSZ                           = 0x80087467
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x8000
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x801054d5
 	TUNDETACHFILTER                      = 0x801054d6
 	TUNGETFEATURES                       = 0x400454cf
@@ -1794,6 +1846,8 @@
 	TUNSETVNETHDRSZ                      = 0x800454d8
 	TUNSETVNETLE                         = 0x800454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x10
 	VEOL                                 = 0x11
@@ -1824,6 +1878,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x40045702
+	WDIOC_GETPRETIMEOUT                  = 0x40045709
+	WDIOC_GETSTATUS                      = 0x40045701
+	WDIOC_GETSUPPORT                     = 0x40285700
+	WDIOC_GETTEMP                        = 0x40045703
+	WDIOC_GETTIMELEFT                    = 0x4004570a
+	WDIOC_GETTIMEOUT                     = 0x40045707
+	WDIOC_KEEPALIVE                      = 0x40045705
+	WDIOC_SETOPTIONS                     = 0x40045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 84c0e3c..f795862 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x2000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -902,6 +930,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -916,6 +945,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -934,6 +964,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -954,8 +985,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1012,6 +1045,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1245,7 +1279,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1270,7 +1304,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1314,6 +1348,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1322,6 +1357,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1343,10 +1379,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1361,8 +1398,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1373,6 +1410,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1403,6 +1441,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1541,6 +1580,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1549,11 +1589,13 @@
 	SO_ERROR                             = 0x1007
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x8
 	SO_LINGER                            = 0x80
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0x100
@@ -1561,6 +1603,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x12
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1e
 	SO_PRIORITY                          = 0xc
@@ -1629,6 +1672,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x5407
 	TCGETA                               = 0x5401
 	TCGETS                               = 0x540d
@@ -1651,6 +1700,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1709,6 +1759,7 @@
 	TIOCGPKT                             = 0x40045438
 	TIOCGPTLCK                           = 0x40045439
 	TIOCGPTN                             = 0x40045430
+	TIOCGPTPEER                          = 0x20005441
 	TIOCGRS485                           = 0x4020542e
 	TIOCGSERIAL                          = 0x5484
 	TIOCGSID                             = 0x7416
@@ -1769,6 +1820,7 @@
 	TIOCSWINSZ                           = 0x80087467
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x8000
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x800854d5
 	TUNDETACHFILTER                      = 0x800854d6
 	TUNGETFEATURES                       = 0x400454cf
@@ -1794,6 +1846,8 @@
 	TUNSETVNETHDRSZ                      = 0x800454d8
 	TUNSETVNETLE                         = 0x800454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x10
 	VEOL                                 = 0x11
@@ -1824,6 +1878,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x40045702
+	WDIOC_GETPRETIMEOUT                  = 0x40045709
+	WDIOC_GETSTATUS                      = 0x40045701
+	WDIOC_GETSUPPORT                     = 0x40285700
+	WDIOC_GETTEMP                        = 0x40045703
+	WDIOC_GETTIMELEFT                    = 0x4004570a
+	WDIOC_GETTIMEOUT                     = 0x40045707
+	WDIOC_KEEPALIVE                      = 0x40045705
+	WDIOC_SETOPTIONS                     = 0x40045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index 8e4606e..2544c4b 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x17
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x4000
 	FFDLY                                = 0x4000
 	FLUSHO                               = 0x800000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x4000
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -901,6 +929,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -915,6 +944,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -933,6 +963,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -955,8 +986,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1013,6 +1046,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1155,7 +1189,7 @@
 	PR_SET_NO_NEW_PRIVS                  = 0x26
 	PR_SET_PDEATHSIG                     = 0x1
 	PR_SET_PTRACER                       = 0x59616d61
-	PR_SET_PTRACER_ANY                   = -0x1
+	PR_SET_PTRACER_ANY                   = 0xffffffffffffffff
 	PR_SET_SECCOMP                       = 0x16
 	PR_SET_SECUREBITS                    = 0x1c
 	PR_SET_THP_DISABLE                   = 0x29
@@ -1301,7 +1335,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1326,7 +1360,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1370,6 +1404,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1378,6 +1413,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1399,10 +1435,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1417,8 +1454,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1429,6 +1466,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1459,6 +1497,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1597,6 +1636,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1605,11 +1645,13 @@
 	SO_ERROR                             = 0x4
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x9
 	SO_LINGER                            = 0xd
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0xa
@@ -1617,6 +1659,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x15
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1f
 	SO_PRIORITY                          = 0xc
@@ -1684,6 +1727,12 @@
 	TAB2                                 = 0x800
 	TAB3                                 = 0xc00
 	TABDLY                               = 0xc00
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x2000741f
 	TCGETA                               = 0x40147417
 	TCGETS                               = 0x402c7413
@@ -1705,6 +1754,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1761,6 +1811,7 @@
 	TIOCGPKT                             = 0x40045438
 	TIOCGPTLCK                           = 0x40045439
 	TIOCGPTN                             = 0x40045430
+	TIOCGPTPEER                          = 0x20005441
 	TIOCGRS485                           = 0x542e
 	TIOCGSERIAL                          = 0x541e
 	TIOCGSID                             = 0x5429
@@ -1827,6 +1878,7 @@
 	TIOCSWINSZ                           = 0x80087467
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x400000
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x801054d5
 	TUNDETACHFILTER                      = 0x801054d6
 	TUNGETFEATURES                       = 0x400454cf
@@ -1852,6 +1904,8 @@
 	TUNSETVNETHDRSZ                      = 0x800454d8
 	TUNSETVNETLE                         = 0x800454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0x10
 	VEOF                                 = 0x4
 	VEOL                                 = 0x6
@@ -1881,6 +1935,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x40045702
+	WDIOC_GETPRETIMEOUT                  = 0x40045709
+	WDIOC_GETSTATUS                      = 0x40045701
+	WDIOC_GETSUPPORT                     = 0x40285700
+	WDIOC_GETTEMP                        = 0x40045703
+	WDIOC_GETTIMELEFT                    = 0x4004570a
+	WDIOC_GETTIMEOUT                     = 0x40045707
+	WDIOC_KEEPALIVE                      = 0x40045705
+	WDIOC_SETOPTIONS                     = 0x40045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
@@ -2061,7 +2126,6 @@
 	SIGTSTP   = syscall.Signal(0x14)
 	SIGTTIN   = syscall.Signal(0x15)
 	SIGTTOU   = syscall.Signal(0x16)
-	SIGUNUSED = syscall.Signal(0x1f)
 	SIGURG    = syscall.Signal(0x17)
 	SIGUSR1   = syscall.Signal(0xa)
 	SIGUSR2   = syscall.Signal(0xc)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 16ed193..133bdf5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x17
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x4000
 	FFDLY                                = 0x4000
 	FLUSHO                               = 0x800000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x4000
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -901,6 +929,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -915,6 +944,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -933,6 +963,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -955,8 +986,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1013,6 +1046,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1155,7 +1189,7 @@
 	PR_SET_NO_NEW_PRIVS                  = 0x26
 	PR_SET_PDEATHSIG                     = 0x1
 	PR_SET_PTRACER                       = 0x59616d61
-	PR_SET_PTRACER_ANY                   = -0x1
+	PR_SET_PTRACER_ANY                   = 0xffffffffffffffff
 	PR_SET_SECCOMP                       = 0x16
 	PR_SET_SECUREBITS                    = 0x1c
 	PR_SET_THP_DISABLE                   = 0x29
@@ -1301,7 +1335,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1326,7 +1360,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1370,6 +1404,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1378,6 +1413,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1399,10 +1435,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1417,8 +1454,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1429,6 +1466,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1459,6 +1497,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1597,6 +1636,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1605,11 +1645,13 @@
 	SO_ERROR                             = 0x4
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x9
 	SO_LINGER                            = 0xd
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0xa
@@ -1617,6 +1659,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x15
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1f
 	SO_PRIORITY                          = 0xc
@@ -1684,6 +1727,12 @@
 	TAB2                                 = 0x800
 	TAB3                                 = 0xc00
 	TABDLY                               = 0xc00
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x2000741f
 	TCGETA                               = 0x40147417
 	TCGETS                               = 0x402c7413
@@ -1705,6 +1754,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1761,6 +1811,7 @@
 	TIOCGPKT                             = 0x40045438
 	TIOCGPTLCK                           = 0x40045439
 	TIOCGPTN                             = 0x40045430
+	TIOCGPTPEER                          = 0x20005441
 	TIOCGRS485                           = 0x542e
 	TIOCGSERIAL                          = 0x541e
 	TIOCGSID                             = 0x5429
@@ -1827,6 +1878,7 @@
 	TIOCSWINSZ                           = 0x80087467
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x400000
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x801054d5
 	TUNDETACHFILTER                      = 0x801054d6
 	TUNGETFEATURES                       = 0x400454cf
@@ -1852,6 +1904,8 @@
 	TUNSETVNETHDRSZ                      = 0x800454d8
 	TUNSETVNETLE                         = 0x800454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0x10
 	VEOF                                 = 0x4
 	VEOL                                 = 0x6
@@ -1881,6 +1935,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x40045702
+	WDIOC_GETPRETIMEOUT                  = 0x40045709
+	WDIOC_GETSTATUS                      = 0x40045701
+	WDIOC_GETSUPPORT                     = 0x40285700
+	WDIOC_GETTEMP                        = 0x40045703
+	WDIOC_GETTIMELEFT                    = 0x4004570a
+	WDIOC_GETTIMEOUT                     = 0x40045707
+	WDIOC_KEEPALIVE                      = 0x40045705
+	WDIOC_SETOPTIONS                     = 0x40045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
@@ -2061,7 +2126,6 @@
 	SIGTSTP   = syscall.Signal(0x14)
 	SIGTTIN   = syscall.Signal(0x15)
 	SIGTTOU   = syscall.Signal(0x16)
-	SIGUNUSED = syscall.Signal(0x1f)
 	SIGURG    = syscall.Signal(0x17)
 	SIGUSR1   = syscall.Signal(0xa)
 	SIGUSR2   = syscall.Signal(0xc)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index bd385f8..b921fb1 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -36,7 +36,7 @@
 	AF_KEY                               = 0xf
 	AF_LLC                               = 0x1a
 	AF_LOCAL                             = 0x1
-	AF_MAX                               = 0x2b
+	AF_MAX                               = 0x2c
 	AF_MPLS                              = 0x1c
 	AF_NETBEUI                           = 0xd
 	AF_NETLINK                           = 0x10
@@ -51,6 +51,7 @@
 	AF_ROUTE                             = 0x10
 	AF_RXRPC                             = 0x21
 	AF_SECURITY                          = 0xe
+	AF_SMC                               = 0x2b
 	AF_SNA                               = 0x16
 	AF_TIPC                              = 0x1e
 	AF_UNIX                              = 0x1
@@ -129,6 +130,7 @@
 	ARPHRD_TUNNEL                        = 0x300
 	ARPHRD_TUNNEL6                       = 0x301
 	ARPHRD_VOID                          = 0xffff
+	ARPHRD_VSOCKMON                      = 0x33a
 	ARPHRD_X25                           = 0x10f
 	B0                                   = 0x0
 	B1000000                             = 0x1008
@@ -392,6 +394,7 @@
 	ETH_P_FIP                            = 0x8914
 	ETH_P_HDLC                           = 0x19
 	ETH_P_HSR                            = 0x892f
+	ETH_P_IBOE                           = 0x8915
 	ETH_P_IEEE802154                     = 0xf6
 	ETH_P_IEEEPUP                        = 0xa00
 	ETH_P_IEEEPUPAT                      = 0xa01
@@ -453,6 +456,8 @@
 	FF1                                  = 0x8000
 	FFDLY                                = 0x8000
 	FLUSHO                               = 0x1000
+	FS_ENCRYPTION_MODE_AES_128_CBC       = 0x5
+	FS_ENCRYPTION_MODE_AES_128_CTS       = 0x6
 	FS_ENCRYPTION_MODE_AES_256_CBC       = 0x3
 	FS_ENCRYPTION_MODE_AES_256_CTS       = 0x4
 	FS_ENCRYPTION_MODE_AES_256_GCM       = 0x2
@@ -507,6 +512,19 @@
 	F_ULOCK                              = 0x0
 	F_UNLCK                              = 0x2
 	F_WRLCK                              = 0x1
+	GENL_ADMIN_PERM                      = 0x1
+	GENL_CMD_CAP_DO                      = 0x2
+	GENL_CMD_CAP_DUMP                    = 0x4
+	GENL_CMD_CAP_HASPOL                  = 0x8
+	GENL_HDRLEN                          = 0x4
+	GENL_ID_CTRL                         = 0x10
+	GENL_ID_PMCRAID                      = 0x12
+	GENL_ID_VFS_DQUOT                    = 0x11
+	GENL_MAX_ID                          = 0x3ff
+	GENL_MIN_ID                          = 0x10
+	GENL_NAMSIZ                          = 0x10
+	GENL_START_ALLOC                     = 0x13
+	GENL_UNS_ADMIN_PERM                  = 0x10
 	GRND_NONBLOCK                        = 0x1
 	GRND_RANDOM                          = 0x2
 	HUPCL                                = 0x400
@@ -644,8 +662,10 @@
 	IPV6_2292PKTOPTIONS                  = 0x6
 	IPV6_2292RTHDR                       = 0x5
 	IPV6_ADDRFORM                        = 0x1
+	IPV6_ADDR_PREFERENCES                = 0x48
 	IPV6_ADD_MEMBERSHIP                  = 0x14
 	IPV6_AUTHHDR                         = 0xa
+	IPV6_AUTOFLOWLABEL                   = 0x46
 	IPV6_CHECKSUM                        = 0x7
 	IPV6_DONTFRAG                        = 0x3e
 	IPV6_DROP_MEMBERSHIP                 = 0x15
@@ -658,12 +678,14 @@
 	IPV6_JOIN_GROUP                      = 0x14
 	IPV6_LEAVE_ANYCAST                   = 0x1c
 	IPV6_LEAVE_GROUP                     = 0x15
+	IPV6_MINHOPCOUNT                     = 0x49
 	IPV6_MTU                             = 0x18
 	IPV6_MTU_DISCOVER                    = 0x17
 	IPV6_MULTICAST_HOPS                  = 0x12
 	IPV6_MULTICAST_IF                    = 0x11
 	IPV6_MULTICAST_LOOP                  = 0x13
 	IPV6_NEXTHOP                         = 0x9
+	IPV6_ORIGDSTADDR                     = 0x4a
 	IPV6_PATHMTU                         = 0x3d
 	IPV6_PKTINFO                         = 0x32
 	IPV6_PMTUDISC_DO                     = 0x2
@@ -674,8 +696,10 @@
 	IPV6_PMTUDISC_WANT                   = 0x1
 	IPV6_RECVDSTOPTS                     = 0x3a
 	IPV6_RECVERR                         = 0x19
+	IPV6_RECVFRAGSIZE                    = 0x4d
 	IPV6_RECVHOPLIMIT                    = 0x33
 	IPV6_RECVHOPOPTS                     = 0x35
+	IPV6_RECVORIGDSTADDR                 = 0x4a
 	IPV6_RECVPATHMTU                     = 0x3c
 	IPV6_RECVPKTINFO                     = 0x31
 	IPV6_RECVRTHDR                       = 0x38
@@ -689,7 +713,9 @@
 	IPV6_RXDSTOPTS                       = 0x3b
 	IPV6_RXHOPOPTS                       = 0x36
 	IPV6_TCLASS                          = 0x43
+	IPV6_TRANSPARENT                     = 0x4b
 	IPV6_UNICAST_HOPS                    = 0x10
+	IPV6_UNICAST_IF                      = 0x4c
 	IPV6_V6ONLY                          = 0x1a
 	IPV6_XFRM_POLICY                     = 0x23
 	IP_ADD_MEMBERSHIP                    = 0x23
@@ -732,6 +758,7 @@
 	IP_PMTUDISC_PROBE                    = 0x3
 	IP_PMTUDISC_WANT                     = 0x1
 	IP_RECVERR                           = 0xb
+	IP_RECVFRAGSIZE                      = 0x19
 	IP_RECVOPTS                          = 0x6
 	IP_RECVORIGDSTADDR                   = 0x14
 	IP_RECVRETOPTS                       = 0x7
@@ -769,6 +796,7 @@
 	KEYCTL_NEGATE                        = 0xd
 	KEYCTL_READ                          = 0xb
 	KEYCTL_REJECT                        = 0x13
+	KEYCTL_RESTRICT_KEYRING              = 0x1d
 	KEYCTL_REVOKE                        = 0x3
 	KEYCTL_SEARCH                        = 0xa
 	KEYCTL_SESSION_TO_PARENT             = 0x12
@@ -901,6 +929,7 @@
 	MS_SILENT                            = 0x8000
 	MS_SLAVE                             = 0x80000
 	MS_STRICTATIME                       = 0x1000000
+	MS_SUBMOUNT                          = 0x4000000
 	MS_SYNC                              = 0x4
 	MS_SYNCHRONOUS                       = 0x10
 	MS_UNBINDABLE                        = 0x20000
@@ -915,6 +944,7 @@
 	NETLINK_DNRTMSG                      = 0xe
 	NETLINK_DROP_MEMBERSHIP              = 0x2
 	NETLINK_ECRYPTFS                     = 0x13
+	NETLINK_EXT_ACK                      = 0xb
 	NETLINK_FIB_LOOKUP                   = 0xa
 	NETLINK_FIREWALL                     = 0x3
 	NETLINK_GENERIC                      = 0x10
@@ -933,6 +963,7 @@
 	NETLINK_RX_RING                      = 0x6
 	NETLINK_SCSITRANSPORT                = 0x12
 	NETLINK_SELINUX                      = 0x7
+	NETLINK_SMC                          = 0x16
 	NETLINK_SOCK_DIAG                    = 0x4
 	NETLINK_TX_RING                      = 0x7
 	NETLINK_UNUSED                       = 0x1
@@ -953,8 +984,10 @@
 	NLMSG_NOOP                           = 0x1
 	NLMSG_OVERRUN                        = 0x4
 	NLM_F_ACK                            = 0x4
+	NLM_F_ACK_TLVS                       = 0x200
 	NLM_F_APPEND                         = 0x800
 	NLM_F_ATOMIC                         = 0x400
+	NLM_F_CAPPED                         = 0x100
 	NLM_F_CREATE                         = 0x400
 	NLM_F_DUMP                           = 0x300
 	NLM_F_DUMP_FILTERED                  = 0x20
@@ -1011,6 +1044,7 @@
 	PACKET_FANOUT_EBPF                   = 0x7
 	PACKET_FANOUT_FLAG_DEFRAG            = 0x8000
 	PACKET_FANOUT_FLAG_ROLLOVER          = 0x1000
+	PACKET_FANOUT_FLAG_UNIQUEID          = 0x2000
 	PACKET_FANOUT_HASH                   = 0x0
 	PACKET_FANOUT_LB                     = 0x1
 	PACKET_FANOUT_QM                     = 0x5
@@ -1152,7 +1186,7 @@
 	PR_SET_NO_NEW_PRIVS                  = 0x26
 	PR_SET_PDEATHSIG                     = 0x1
 	PR_SET_PTRACER                       = 0x59616d61
-	PR_SET_PTRACER_ANY                   = -0x1
+	PR_SET_PTRACER_ANY                   = 0xffffffffffffffff
 	PR_SET_SECCOMP                       = 0x16
 	PR_SET_SECUREBITS                    = 0x1c
 	PR_SET_THP_DISABLE                   = 0x29
@@ -1305,7 +1339,7 @@
 	RLIMIT_RTTIME                        = 0xf
 	RLIMIT_SIGPENDING                    = 0xb
 	RLIMIT_STACK                         = 0x3
-	RLIM_INFINITY                        = -0x1
+	RLIM_INFINITY                        = 0xffffffffffffffff
 	RTAX_ADVMSS                          = 0x8
 	RTAX_CC_ALGO                         = 0x10
 	RTAX_CWND                            = 0x7
@@ -1330,7 +1364,7 @@
 	RTAX_UNSPEC                          = 0x0
 	RTAX_WINDOW                          = 0x3
 	RTA_ALIGNTO                          = 0x4
-	RTA_MAX                              = 0x19
+	RTA_MAX                              = 0x1a
 	RTCF_DIRECTSRC                       = 0x4000000
 	RTCF_DOREDIRECT                      = 0x1000000
 	RTCF_LOG                             = 0x2000000
@@ -1374,6 +1408,7 @@
 	RTM_DELLINK                          = 0x11
 	RTM_DELMDB                           = 0x55
 	RTM_DELNEIGH                         = 0x1d
+	RTM_DELNETCONF                       = 0x51
 	RTM_DELNSID                          = 0x59
 	RTM_DELQDISC                         = 0x25
 	RTM_DELROUTE                         = 0x19
@@ -1382,6 +1417,7 @@
 	RTM_DELTFILTER                       = 0x2d
 	RTM_F_CLONED                         = 0x200
 	RTM_F_EQUALIZE                       = 0x400
+	RTM_F_FIB_MATCH                      = 0x2000
 	RTM_F_LOOKUP_TABLE                   = 0x1000
 	RTM_F_NOTIFY                         = 0x100
 	RTM_F_PREFIX                         = 0x800
@@ -1403,10 +1439,11 @@
 	RTM_GETSTATS                         = 0x5e
 	RTM_GETTCLASS                        = 0x2a
 	RTM_GETTFILTER                       = 0x2e
-	RTM_MAX                              = 0x5f
+	RTM_MAX                              = 0x63
 	RTM_NEWACTION                        = 0x30
 	RTM_NEWADDR                          = 0x14
 	RTM_NEWADDRLABEL                     = 0x48
+	RTM_NEWCACHEREPORT                   = 0x60
 	RTM_NEWLINK                          = 0x10
 	RTM_NEWMDB                           = 0x54
 	RTM_NEWNDUSEROPT                     = 0x44
@@ -1421,8 +1458,8 @@
 	RTM_NEWSTATS                         = 0x5c
 	RTM_NEWTCLASS                        = 0x28
 	RTM_NEWTFILTER                       = 0x2c
-	RTM_NR_FAMILIES                      = 0x14
-	RTM_NR_MSGTYPES                      = 0x50
+	RTM_NR_FAMILIES                      = 0x15
+	RTM_NR_MSGTYPES                      = 0x54
 	RTM_SETDCB                           = 0x4f
 	RTM_SETLINK                          = 0x13
 	RTM_SETNEIGHTBL                      = 0x43
@@ -1433,6 +1470,7 @@
 	RTNH_F_OFFLOAD                       = 0x8
 	RTNH_F_ONLINK                        = 0x4
 	RTNH_F_PERVASIVE                     = 0x2
+	RTNH_F_UNRESOLVED                    = 0x20
 	RTN_MAX                              = 0xb
 	RTPROT_BABEL                         = 0x2a
 	RTPROT_BIRD                          = 0xc
@@ -1463,6 +1501,7 @@
 	SCM_TIMESTAMP                        = 0x1d
 	SCM_TIMESTAMPING                     = 0x25
 	SCM_TIMESTAMPING_OPT_STATS           = 0x36
+	SCM_TIMESTAMPING_PKTINFO             = 0x3a
 	SCM_TIMESTAMPNS                      = 0x23
 	SCM_WIFI_STATUS                      = 0x29
 	SECCOMP_MODE_DISABLED                = 0x0
@@ -1601,6 +1640,7 @@
 	SO_BSDCOMPAT                         = 0xe
 	SO_BUSY_POLL                         = 0x2e
 	SO_CNX_ADVICE                        = 0x35
+	SO_COOKIE                            = 0x39
 	SO_DEBUG                             = 0x1
 	SO_DETACH_BPF                        = 0x1b
 	SO_DETACH_FILTER                     = 0x1b
@@ -1609,11 +1649,13 @@
 	SO_ERROR                             = 0x4
 	SO_GET_FILTER                        = 0x1a
 	SO_INCOMING_CPU                      = 0x31
+	SO_INCOMING_NAPI_ID                  = 0x38
 	SO_KEEPALIVE                         = 0x9
 	SO_LINGER                            = 0xd
 	SO_LOCK_FILTER                       = 0x2c
 	SO_MARK                              = 0x24
 	SO_MAX_PACING_RATE                   = 0x2f
+	SO_MEMINFO                           = 0x37
 	SO_NOFCS                             = 0x2b
 	SO_NO_CHECK                          = 0xb
 	SO_OOBINLINE                         = 0xa
@@ -1621,6 +1663,7 @@
 	SO_PASSSEC                           = 0x22
 	SO_PEEK_OFF                          = 0x2a
 	SO_PEERCRED                          = 0x11
+	SO_PEERGROUPS                        = 0x3b
 	SO_PEERNAME                          = 0x1c
 	SO_PEERSEC                           = 0x1f
 	SO_PRIORITY                          = 0xc
@@ -1688,6 +1731,12 @@
 	TAB2                                 = 0x1000
 	TAB3                                 = 0x1800
 	TABDLY                               = 0x1800
+	TASKSTATS_CMD_ATTR_MAX               = 0x4
+	TASKSTATS_CMD_MAX                    = 0x2
+	TASKSTATS_GENL_NAME                  = "TASKSTATS"
+	TASKSTATS_GENL_VERSION               = 0x1
+	TASKSTATS_TYPE_MAX                   = 0x6
+	TASKSTATS_VERSION                    = 0x8
 	TCFLSH                               = 0x540b
 	TCGETA                               = 0x5405
 	TCGETS                               = 0x5401
@@ -1711,6 +1760,7 @@
 	TCP_CORK                             = 0x3
 	TCP_DEFER_ACCEPT                     = 0x9
 	TCP_FASTOPEN                         = 0x17
+	TCP_FASTOPEN_CONNECT                 = 0x1e
 	TCP_INFO                             = 0xb
 	TCP_KEEPCNT                          = 0x6
 	TCP_KEEPIDLE                         = 0x4
@@ -1770,6 +1820,7 @@
 	TIOCGPKT                             = 0x80045438
 	TIOCGPTLCK                           = 0x80045439
 	TIOCGPTN                             = 0x80045430
+	TIOCGPTPEER                          = 0x5441
 	TIOCGRS485                           = 0x542e
 	TIOCGSERIAL                          = 0x541e
 	TIOCGSID                             = 0x5429
@@ -1827,6 +1878,7 @@
 	TIOCSWINSZ                           = 0x5414
 	TIOCVHANGUP                          = 0x5437
 	TOSTOP                               = 0x100
+	TS_COMM_LEN                          = 0x20
 	TUNATTACHFILTER                      = 0x401054d5
 	TUNDETACHFILTER                      = 0x401054d6
 	TUNGETFEATURES                       = 0x800454cf
@@ -1852,6 +1904,8 @@
 	TUNSETVNETHDRSZ                      = 0x400454d8
 	TUNSETVNETLE                         = 0x400454dc
 	UMOUNT_NOFOLLOW                      = 0x8
+	UTIME_NOW                            = 0x3fffffff
+	UTIME_OMIT                           = 0x3ffffffe
 	VDISCARD                             = 0xd
 	VEOF                                 = 0x4
 	VEOL                                 = 0xb
@@ -1881,6 +1935,17 @@
 	WALL                                 = 0x40000000
 	WCLONE                               = 0x80000000
 	WCONTINUED                           = 0x8
+	WDIOC_GETBOOTSTATUS                  = 0x80045702
+	WDIOC_GETPRETIMEOUT                  = 0x80045709
+	WDIOC_GETSTATUS                      = 0x80045701
+	WDIOC_GETSUPPORT                     = 0x80285700
+	WDIOC_GETTEMP                        = 0x80045703
+	WDIOC_GETTIMELEFT                    = 0x8004570a
+	WDIOC_GETTIMEOUT                     = 0x80045707
+	WDIOC_KEEPALIVE                      = 0x80045705
+	WDIOC_SETOPTIONS                     = 0x80045704
+	WDIOC_SETPRETIMEOUT                  = 0xc0045708
+	WDIOC_SETTIMEOUT                     = 0xc0045706
 	WEXITED                              = 0x4
 	WNOHANG                              = 0x1
 	WNOTHREAD                            = 0x20000000
@@ -2061,7 +2126,6 @@
 	SIGTSTP   = syscall.Signal(0x14)
 	SIGTTIN   = syscall.Signal(0x15)
 	SIGTTOU   = syscall.Signal(0x16)
-	SIGUNUSED = syscall.Signal(0x1f)
 	SIGURG    = syscall.Signal(0x17)
 	SIGUSR1   = syscall.Signal(0xa)
 	SIGUSR2   = syscall.Signal(0xc)
diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
index 81e83d7..09eedb0 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go
@@ -664,6 +664,8 @@
 	MS_OLDSYNC                    = 0x0
 	MS_SYNC                       = 0x4
 	M_FLUSH                       = 0x86
+	NAME_MAX                      = 0xff
+	NEWDEV                        = 0x1
 	NL0                           = 0x0
 	NL1                           = 0x100
 	NLDLY                         = 0x100
@@ -672,6 +674,9 @@
 	OFDEL                         = 0x80
 	OFILL                         = 0x40
 	OLCUC                         = 0x2
+	OLDDEV                        = 0x0
+	ONBITSMAJOR                   = 0x7
+	ONBITSMINOR                   = 0x8
 	ONLCR                         = 0x4
 	ONLRET                        = 0x20
 	ONOCR                         = 0x10
@@ -1105,6 +1110,7 @@
 	VEOL                          = 0x5
 	VEOL2                         = 0x6
 	VERASE                        = 0x2
+	VERASE2                       = 0x11
 	VINTR                         = 0x0
 	VKILL                         = 0x3
 	VLNEXT                        = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zptrace386_linux.go b/vendor/golang.org/x/sys/unix/zptrace386_linux.go
new file mode 100644
index 0000000..2d21c49
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zptrace386_linux.go
@@ -0,0 +1,80 @@
+// Code generated by linux/mkall.go generatePtracePair(386, amd64). DO NOT EDIT.
+
+// +build linux
+// +build 386 amd64
+
+package unix
+
+import "unsafe"
+
+// PtraceRegs386 is the registers used by 386 binaries.
+type PtraceRegs386 struct {
+	Ebx      int32
+	Ecx      int32
+	Edx      int32
+	Esi      int32
+	Edi      int32
+	Ebp      int32
+	Eax      int32
+	Xds      int32
+	Xes      int32
+	Xfs      int32
+	Xgs      int32
+	Orig_eax int32
+	Eip      int32
+	Xcs      int32
+	Eflags   int32
+	Esp      int32
+	Xss      int32
+}
+
+// PtraceGetRegs386 fetches the registers used by 386 binaries.
+func PtraceGetRegs386(pid int, regsout *PtraceRegs386) error {
+	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+// PtraceSetRegs386 sets the registers used by 386 binaries.
+func PtraceSetRegs386(pid int, regs *PtraceRegs386) error {
+	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
+
+// PtraceRegsAmd64 is the registers used by amd64 binaries.
+type PtraceRegsAmd64 struct {
+	R15      uint64
+	R14      uint64
+	R13      uint64
+	R12      uint64
+	Rbp      uint64
+	Rbx      uint64
+	R11      uint64
+	R10      uint64
+	R9       uint64
+	R8       uint64
+	Rax      uint64
+	Rcx      uint64
+	Rdx      uint64
+	Rsi      uint64
+	Rdi      uint64
+	Orig_rax uint64
+	Rip      uint64
+	Cs       uint64
+	Eflags   uint64
+	Rsp      uint64
+	Ss       uint64
+	Fs_base  uint64
+	Gs_base  uint64
+	Ds       uint64
+	Es       uint64
+	Fs       uint64
+	Gs       uint64
+}
+
+// PtraceGetRegsAmd64 fetches the registers used by amd64 binaries.
+func PtraceGetRegsAmd64(pid int, regsout *PtraceRegsAmd64) error {
+	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+// PtraceSetRegsAmd64 sets the registers used by amd64 binaries.
+func PtraceSetRegsAmd64(pid int, regs *PtraceRegsAmd64) error {
+	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
diff --git a/vendor/golang.org/x/sys/unix/zptracearm_linux.go b/vendor/golang.org/x/sys/unix/zptracearm_linux.go
new file mode 100644
index 0000000..faf23bb
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zptracearm_linux.go
@@ -0,0 +1,41 @@
+// Code generated by linux/mkall.go generatePtracePair(arm, arm64). DO NOT EDIT.
+
+// +build linux
+// +build arm arm64
+
+package unix
+
+import "unsafe"
+
+// PtraceRegsArm is the registers used by arm binaries.
+type PtraceRegsArm struct {
+	Uregs [18]uint32
+}
+
+// PtraceGetRegsArm fetches the registers used by arm binaries.
+func PtraceGetRegsArm(pid int, regsout *PtraceRegsArm) error {
+	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+// PtraceSetRegsArm sets the registers used by arm binaries.
+func PtraceSetRegsArm(pid int, regs *PtraceRegsArm) error {
+	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
+
+// PtraceRegsArm64 is the registers used by arm64 binaries.
+type PtraceRegsArm64 struct {
+	Regs   [31]uint64
+	Sp     uint64
+	Pc     uint64
+	Pstate uint64
+}
+
+// PtraceGetRegsArm64 fetches the registers used by arm64 binaries.
+func PtraceGetRegsArm64(pid int, regsout *PtraceRegsArm64) error {
+	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+// PtraceSetRegsArm64 sets the registers used by arm64 binaries.
+func PtraceSetRegsArm64(pid int, regs *PtraceRegsArm64) error {
+	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
diff --git a/vendor/golang.org/x/sys/unix/zptracemips_linux.go b/vendor/golang.org/x/sys/unix/zptracemips_linux.go
new file mode 100644
index 0000000..c431131
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zptracemips_linux.go
@@ -0,0 +1,50 @@
+// Code generated by linux/mkall.go generatePtracePair(mips, mips64). DO NOT EDIT.
+
+// +build linux
+// +build mips mips64
+
+package unix
+
+import "unsafe"
+
+// PtraceRegsMips is the registers used by mips binaries.
+type PtraceRegsMips struct {
+	Regs     [32]uint64
+	Lo       uint64
+	Hi       uint64
+	Epc      uint64
+	Badvaddr uint64
+	Status   uint64
+	Cause    uint64
+}
+
+// PtraceGetRegsMips fetches the registers used by mips binaries.
+func PtraceGetRegsMips(pid int, regsout *PtraceRegsMips) error {
+	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+// PtraceSetRegsMips sets the registers used by mips binaries.
+func PtraceSetRegsMips(pid int, regs *PtraceRegsMips) error {
+	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
+
+// PtraceRegsMips64 is the registers used by mips64 binaries.
+type PtraceRegsMips64 struct {
+	Regs     [32]uint64
+	Lo       uint64
+	Hi       uint64
+	Epc      uint64
+	Badvaddr uint64
+	Status   uint64
+	Cause    uint64
+}
+
+// PtraceGetRegsMips64 fetches the registers used by mips64 binaries.
+func PtraceGetRegsMips64(pid int, regsout *PtraceRegsMips64) error {
+	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+// PtraceSetRegsMips64 sets the registers used by mips64 binaries.
+func PtraceSetRegsMips64(pid int, regs *PtraceRegsMips64) error {
+	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
diff --git a/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go
new file mode 100644
index 0000000..dc3d6d3
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/zptracemipsle_linux.go
@@ -0,0 +1,50 @@
+// Code generated by linux/mkall.go generatePtracePair(mipsle, mips64le). DO NOT EDIT.
+
+// +build linux
+// +build mipsle mips64le
+
+package unix
+
+import "unsafe"
+
+// PtraceRegsMipsle is the registers used by mipsle binaries.
+type PtraceRegsMipsle struct {
+	Regs     [32]uint64
+	Lo       uint64
+	Hi       uint64
+	Epc      uint64
+	Badvaddr uint64
+	Status   uint64
+	Cause    uint64
+}
+
+// PtraceGetRegsMipsle fetches the registers used by mipsle binaries.
+func PtraceGetRegsMipsle(pid int, regsout *PtraceRegsMipsle) error {
+	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+// PtraceSetRegsMipsle sets the registers used by mipsle binaries.
+func PtraceSetRegsMipsle(pid int, regs *PtraceRegsMipsle) error {
+	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
+
+// PtraceRegsMips64le is the registers used by mips64le binaries.
+type PtraceRegsMips64le struct {
+	Regs     [32]uint64
+	Lo       uint64
+	Hi       uint64
+	Epc      uint64
+	Badvaddr uint64
+	Status   uint64
+	Cause    uint64
+}
+
+// PtraceGetRegsMips64le fetches the registers used by mips64le binaries.
+func PtraceGetRegsMips64le(pid int, regsout *PtraceRegsMips64le) error {
+	return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout)))
+}
+
+// PtraceSetRegsMips64le sets the registers used by mips64le binaries.
+func PtraceSetRegsMips64le(pid int, regs *PtraceRegsMips64le) error {
+	return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs)))
+}
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
index 10491e9..9fb1b31 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go
@@ -408,6 +408,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 5f1f6bf..1e0fb46 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -408,6 +408,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
index 7a40974..e1026a8 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go
@@ -408,6 +408,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 07c6ebc..37fb210 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -408,6 +408,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
index 7fa205c..7576147 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
index 1a0bb4c..8bcecfb 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
index ac1e8e0..61c0cf9 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
index 2b4e6ac..ffd0107 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
index 38c1bbd..85a2907 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
index dc8fe0a..8e2be97 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
index 4d28042..5ff0637 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
index 20ad4b6..4076011 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
@@ -1667,17 +1678,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
-	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
 	r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
 	written = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
index 9f194dc..984e561 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
index 4fde3ef..f98194e 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
@@ -1677,17 +1688,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
-	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
 	r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
 	written = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
index f646342..f302670 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
@@ -1677,17 +1688,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
-	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
-	n = int(r0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
 	r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0)
 	written = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
index 964591e..f18c5e4 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
index 204ab1a..bc26824 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
@@ -1734,7 +1745,7 @@
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
-	r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+	r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
index a8a2b0b..8d874cb 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
@@ -1734,7 +1745,7 @@
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
 func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) {
-	r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
+	r0, _, e1 := Syscall6(SYS__NEWSELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0)
 	n = int(r0)
 	if e1 != 0 {
 		err = errnoErr(e1)
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
index b6ff9e3..1693212 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go
@@ -1035,6 +1035,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Pselect(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timespec, sigmask *Sigset_t) (n int, err error) {
+	r0, _, e1 := Syscall6(SYS_PSELECT6, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), uintptr(unsafe.Pointer(sigmask)))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func read(fd int, p []byte) (n int, err error) {
 	var _p0 unsafe.Pointer
 	if len(p) > 0 {
@@ -1446,22 +1457,6 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
-func Munlock(b []byte) (err error) {
-	var _p0 unsafe.Pointer
-	if len(b) > 0 {
-		_p0 = unsafe.Pointer(&b[0])
-	} else {
-		_p0 = unsafe.Pointer(&_zero)
-	}
-	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
-	if e1 != 0 {
-		err = errnoErr(e1)
-	}
-	return
-}
-
-// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
-
 func Mlockall(flags int) (err error) {
 	_, _, e1 := Syscall(SYS_MLOCKALL, uintptr(flags), 0, 0)
 	if e1 != 0 {
@@ -1488,6 +1483,22 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func Munlock(b []byte) (err error) {
+	var _p0 unsafe.Pointer
+	if len(b) > 0 {
+		_p0 = unsafe.Pointer(&b[0])
+	} else {
+		_p0 = unsafe.Pointer(&_zero)
+	}
+	_, _, e1 := Syscall(SYS_MUNLOCK, uintptr(_p0), uintptr(len(b)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Munlockall() (err error) {
 	_, _, e1 := Syscall(SYS_MUNLOCKALL, 0, 0, 0)
 	if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
index db99fd0..cfdea85 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
index 7b6c2c8..244a3c7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
index 0f4cc3b..e891adc 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
index 7baea87..f48beb0 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
index 0d69ce6..44a3faf 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
index 41572c2..1563752 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go
@@ -266,6 +266,17 @@
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := Syscall(SYS_POLL, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout))
+	n = int(r0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func Madvise(b []byte, behav int) (err error) {
 	var _p0 unsafe.Pointer
 	if len(b) > 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 4287133..1d45276 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -25,7 +25,11 @@
 //go:cgo_import_dynamic libc___xnet_recvmsg __xnet_recvmsg "libsocket.so"
 //go:cgo_import_dynamic libc___xnet_sendmsg __xnet_sendmsg "libsocket.so"
 //go:cgo_import_dynamic libc_acct acct "libc.so"
+//go:cgo_import_dynamic libc___makedev __makedev "libc.so"
+//go:cgo_import_dynamic libc___major __major "libc.so"
+//go:cgo_import_dynamic libc___minor __minor "libc.so"
 //go:cgo_import_dynamic libc_ioctl ioctl "libc.so"
+//go:cgo_import_dynamic libc_poll poll "libc.so"
 //go:cgo_import_dynamic libc_access access "libc.so"
 //go:cgo_import_dynamic libc_adjtime adjtime "libc.so"
 //go:cgo_import_dynamic libc_chdir chdir "libc.so"
@@ -75,6 +79,7 @@
 //go:cgo_import_dynamic libc_mlock mlock "libc.so"
 //go:cgo_import_dynamic libc_mlockall mlockall "libc.so"
 //go:cgo_import_dynamic libc_mprotect mprotect "libc.so"
+//go:cgo_import_dynamic libc_msync msync "libc.so"
 //go:cgo_import_dynamic libc_munlock munlock "libc.so"
 //go:cgo_import_dynamic libc_munlockall munlockall "libc.so"
 //go:cgo_import_dynamic libc_nanosleep nanosleep "libc.so"
@@ -129,7 +134,6 @@
 //go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
 //go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
 //go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
-//go:cgo_import_dynamic libc_sysconf sysconf "libc.so"
 
 //go:linkname procpipe libc_pipe
 //go:linkname procgetsockname libc_getsockname
@@ -146,7 +150,11 @@
 //go:linkname proc__xnet_recvmsg libc___xnet_recvmsg
 //go:linkname proc__xnet_sendmsg libc___xnet_sendmsg
 //go:linkname procacct libc_acct
+//go:linkname proc__makedev libc___makedev
+//go:linkname proc__major libc___major
+//go:linkname proc__minor libc___minor
 //go:linkname procioctl libc_ioctl
+//go:linkname procpoll libc_poll
 //go:linkname procAccess libc_access
 //go:linkname procAdjtime libc_adjtime
 //go:linkname procChdir libc_chdir
@@ -196,6 +204,7 @@
 //go:linkname procMlock libc_mlock
 //go:linkname procMlockall libc_mlockall
 //go:linkname procMprotect libc_mprotect
+//go:linkname procMsync libc_msync
 //go:linkname procMunlock libc_munlock
 //go:linkname procMunlockall libc_munlockall
 //go:linkname procNanosleep libc_nanosleep
@@ -250,7 +259,6 @@
 //go:linkname procgetpeername libc_getpeername
 //go:linkname procsetsockopt libc_setsockopt
 //go:linkname procrecvfrom libc_recvfrom
-//go:linkname procsysconf libc_sysconf
 
 var (
 	procpipe,
@@ -268,7 +276,11 @@
 	proc__xnet_recvmsg,
 	proc__xnet_sendmsg,
 	procacct,
+	proc__makedev,
+	proc__major,
+	proc__minor,
 	procioctl,
+	procpoll,
 	procAccess,
 	procAdjtime,
 	procChdir,
@@ -318,6 +330,7 @@
 	procMlock,
 	procMlockall,
 	procMprotect,
+	procMsync,
 	procMunlock,
 	procMunlockall,
 	procNanosleep,
@@ -371,8 +384,7 @@
 	proc__xnet_getsockopt,
 	procgetpeername,
 	procsetsockopt,
-	procrecvfrom,
-	procsysconf syscallFunc
+	procrecvfrom syscallFunc
 )
 
 func pipe(p *[2]_C_int) (n int, err error) {
@@ -522,6 +534,24 @@
 	return
 }
 
+func __makedev(version int, major uint, minor uint) (val uint64) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__makedev)), 3, uintptr(version), uintptr(major), uintptr(minor), 0, 0, 0)
+	val = uint64(r0)
+	return
+}
+
+func __major(version int, dev uint64) (val uint) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__major)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0)
+	val = uint(r0)
+	return
+}
+
+func __minor(version int, dev uint64) (val uint) {
+	r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&proc__minor)), 2, uintptr(version), uintptr(dev), 0, 0, 0, 0)
+	val = uint(r0)
+	return
+}
+
 func ioctl(fd int, req uint, arg uintptr) (err error) {
 	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procioctl)), 3, uintptr(fd), uintptr(req), uintptr(arg), 0, 0, 0)
 	if e1 != 0 {
@@ -530,6 +560,15 @@
 	return
 }
 
+func poll(fds *PollFd, nfds int, timeout int) (n int, err error) {
+	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procpoll)), 3, uintptr(unsafe.Pointer(fds)), uintptr(nfds), uintptr(timeout), 0, 0, 0)
+	n = int(r0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Access(path string, mode uint32) (err error) {
 	var _p0 *byte
 	_p0, err = BytePtrFromString(path)
@@ -1020,6 +1059,18 @@
 	return
 }
 
+func Msync(b []byte, flags int) (err error) {
+	var _p0 *byte
+	if len(b) > 0 {
+		_p0 = &b[0]
+	}
+	_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procMsync)), 3, uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(flags), 0, 0, 0)
+	if e1 != 0 {
+		err = e1
+	}
+	return
+}
+
 func Munlock(b []byte) (err error) {
 	var _p0 *byte
 	if len(b) > 0 {
@@ -1589,12 +1640,3 @@
 	}
 	return
 }
-
-func sysconf(name int) (n int64, err error) {
-	r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsysconf)), 1, uintptr(name), 0, 0, 0, 0, 0)
-	n = int64(r0)
-	if e1 != 0 {
-		err = e1
-	}
-	return
-}
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
similarity index 100%
rename from vendor/golang.org/x/sys/unix/zsysctl_openbsd.go
rename to vendor/golang.org/x/sys/unix/zsysctl_openbsd_386.go
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
similarity index 100%
copy from vendor/golang.org/x/sys/unix/zsysctl_openbsd.go
copy to vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go
diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
similarity index 100%
copy from vendor/golang.org/x/sys/unix/zsysctl_openbsd.go
copy to vendor/golang.org/x/sys/unix/zsysctl_openbsd_arm.go
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index cef4fed..95ab129 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -385,4 +385,6 @@
 	SYS_PKEY_MPROTECT          = 380
 	SYS_PKEY_ALLOC             = 381
 	SYS_PKEY_FREE              = 382
+	SYS_STATX                  = 383
+	SYS_ARCH_PRCTL             = 384
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index 49bfa12..c5dabf2 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -338,4 +338,5 @@
 	SYS_PKEY_MPROTECT          = 329
 	SYS_PKEY_ALLOC             = 330
 	SYS_PKEY_FREE              = 331
+	SYS_STATX                  = 332
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 97b182e..ab7fa5f 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -358,4 +358,5 @@
 	SYS_PKEY_MPROTECT          = 394
 	SYS_PKEY_ALLOC             = 395
 	SYS_PKEY_FREE              = 396
+	SYS_STATX                  = 397
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 6407843..b1c6b4b 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -282,4 +282,5 @@
 	SYS_PKEY_MPROTECT          = 288
 	SYS_PKEY_ALLOC             = 289
 	SYS_PKEY_FREE              = 290
+	SYS_STATX                  = 291
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 939567c..2e9aa7a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -371,4 +371,5 @@
 	SYS_PKEY_MPROTECT          = 4363
 	SYS_PKEY_ALLOC             = 4364
 	SYS_PKEY_FREE              = 4365
+	SYS_STATX                  = 4366
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 09db959..9282763 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -331,4 +331,5 @@
 	SYS_PKEY_MPROTECT          = 5323
 	SYS_PKEY_ALLOC             = 5324
 	SYS_PKEY_FREE              = 5325
+	SYS_STATX                  = 5326
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index d1b872a..45bd3fd 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -331,4 +331,5 @@
 	SYS_PKEY_MPROTECT          = 5323
 	SYS_PKEY_ALLOC             = 5324
 	SYS_PKEY_FREE              = 5325
+	SYS_STATX                  = 5326
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 82ba20f..62ccac4 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -371,4 +371,5 @@
 	SYS_PKEY_MPROTECT          = 4363
 	SYS_PKEY_ALLOC             = 4364
 	SYS_PKEY_FREE              = 4365
+	SYS_STATX                  = 4366
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index 8944448..dfe5dab 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -366,4 +366,5 @@
 	SYS_PREADV2                = 380
 	SYS_PWRITEV2               = 381
 	SYS_KEXEC_FILE_LOAD        = 382
+	SYS_STATX                  = 383
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index 90a039b..eca97f7 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -366,4 +366,5 @@
 	SYS_PREADV2                = 380
 	SYS_PWRITEV2               = 381
 	SYS_KEXEC_FILE_LOAD        = 382
+	SYS_STATX                  = 383
 )
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index aab0cdb..8ea18e6 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -306,6 +306,8 @@
 	SYS_COPY_FILE_RANGE        = 375
 	SYS_PREADV2                = 376
 	SYS_PWRITEV2               = 377
+	SYS_S390_GUARDED_STORAGE   = 378
+	SYS_STATX                  = 379
 	SYS_SELECT                 = 142
 	SYS_GETRLIMIT              = 191
 	SYS_LCHOWN                 = 198
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
index e61d78a..4667c7b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go
@@ -460,3 +460,22 @@
 	AT_SYMLINK_FOLLOW   = 0x40
 	AT_SYMLINK_NOFOLLOW = 0x20
 )
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
index 2619155..3f33b18 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go
@@ -470,3 +470,22 @@
 	AT_SYMLINK_FOLLOW   = 0x40
 	AT_SYMLINK_NOFOLLOW = 0x20
 )
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
index 4dca0d4..463a28b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go
@@ -461,3 +461,22 @@
 	AT_SYMLINK_FOLLOW   = 0x40
 	AT_SYMLINK_NOFOLLOW = 0x20
 )
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
index f2881fd..1ec20a0 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go
@@ -1,6 +1,7 @@
+// cgo -godefs types_darwin.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
+
 // +build arm64,darwin
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_darwin.go
 
 package unix
 
@@ -469,3 +470,22 @@
 	AT_SYMLINK_FOLLOW   = 0x40
 	AT_SYMLINK_NOFOLLOW = 0x20
 )
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
index 67c6bf8..ab515c3 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go
@@ -446,3 +446,22 @@
 	AT_FDCWD            = 0xfffafdcd
 	AT_SYMLINK_NOFOLLOW = 0x1
 )
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
index 5b28bcb..18f7816 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go
@@ -516,6 +516,26 @@
 	AT_SYMLINK_NOFOLLOW = 0x200
 )
 
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR      = 0x8
+	POLLHUP      = 0x10
+	POLLIN       = 0x1
+	POLLINIGNEOF = 0x2000
+	POLLNVAL     = 0x20
+	POLLOUT      = 0x4
+	POLLPRI      = 0x2
+	POLLRDBAND   = 0x80
+	POLLRDNORM   = 0x40
+	POLLWRBAND   = 0x100
+	POLLWRNORM   = 0x4
+)
+
 type CapRights struct {
 	Rights [2]uint64
 }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
index c65d89e..dd0db2a 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go
@@ -519,6 +519,26 @@
 	AT_SYMLINK_NOFOLLOW = 0x200
 )
 
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR      = 0x8
+	POLLHUP      = 0x10
+	POLLIN       = 0x1
+	POLLINIGNEOF = 0x2000
+	POLLNVAL     = 0x20
+	POLLOUT      = 0x4
+	POLLPRI      = 0x2
+	POLLRDBAND   = 0x80
+	POLLRDNORM   = 0x40
+	POLLWRBAND   = 0x100
+	POLLWRNORM   = 0x4
+)
+
 type CapRights struct {
 	Rights [2]uint64
 }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
index 42c0a50..473d3dc 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
@@ -519,6 +519,26 @@
 	AT_SYMLINK_NOFOLLOW = 0x200
 )
 
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR      = 0x8
+	POLLHUP      = 0x10
+	POLLIN       = 0x1
+	POLLINIGNEOF = 0x2000
+	POLLNVAL     = 0x20
+	POLLOUT      = 0x4
+	POLLPRI      = 0x2
+	POLLRDBAND   = 0x80
+	POLLRDNORM   = 0x40
+	POLLWRBAND   = 0x100
+	POLLWRNORM   = 0x4
+)
+
 type CapRights struct {
 	Rights [2]uint64
 }
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
index 0dcebb5..c6de942 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go
@@ -425,7 +425,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -621,12 +621,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -673,8 +673,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -692,3 +690,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]int8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
index d70e543..4ea42df 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go
@@ -429,7 +429,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -637,12 +637,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -691,8 +691,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -710,3 +708,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]int8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
index 497f563..f86d683 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go
@@ -429,7 +429,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -609,12 +609,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]uint8
-	Nodename   [65]uint8
-	Release    [65]uint8
-	Version    [65]uint8
-	Machine    [65]uint8
-	Domainname [65]uint8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -662,8 +662,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -681,3 +679,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]uint8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
index f0bdaed..45c10b7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go
@@ -430,7 +430,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -615,12 +615,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -670,8 +670,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -689,3 +687,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]int8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
index 850a68c..4cc0a1c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go
@@ -428,7 +428,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -614,12 +614,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -667,8 +667,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -686,3 +684,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]int8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
index 92aac5d..d9df087 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go
@@ -430,7 +430,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -618,12 +618,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -672,8 +672,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -691,3 +689,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]int8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
index 623f581..15e6b4b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go
@@ -430,7 +430,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -618,12 +618,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -672,8 +672,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -691,3 +689,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]int8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
index 56598a1..b6c2d32 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go
@@ -428,7 +428,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -614,12 +614,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -667,8 +667,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -686,3 +684,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]int8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
index acc7c81..3803e10 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go
@@ -431,7 +431,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -625,12 +625,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]uint8
-	Nodename   [65]uint8
-	Release    [65]uint8
-	Version    [65]uint8
-	Machine    [65]uint8
-	Domainname [65]uint8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -680,8 +680,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -699,3 +697,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]uint8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
index b348885..7ef31fe 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go
@@ -431,7 +431,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -625,12 +625,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]uint8
-	Nodename   [65]uint8
-	Release    [65]uint8
-	Version    [65]uint8
-	Machine    [65]uint8
-	Domainname [65]uint8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -680,8 +680,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -699,3 +697,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	Pad_cgo_0                 [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	Pad_cgo_1                 [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]uint8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	Pad_cgo_2                 [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	Pad_cgo_3                 [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
index a706e2f..cb194f4 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go
@@ -430,7 +430,7 @@
 	IFLA_LINKINFO       = 0x12
 	IFLA_NET_NS_PID     = 0x13
 	IFLA_IFALIAS        = 0x14
-	IFLA_MAX            = 0x2b
+	IFLA_MAX            = 0x2c
 	RT_SCOPE_UNIVERSE   = 0x0
 	RT_SCOPE_SITE       = 0xc8
 	RT_SCOPE_LINK       = 0xfd
@@ -642,12 +642,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -697,8 +697,6 @@
 
 const PERF_IOC_FLAG_GROUP = 0x1
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
@@ -716,3 +714,104 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type Taskstats struct {
+	Version                   uint16
+	_                         [2]byte
+	Ac_exitcode               uint32
+	Ac_flag                   uint8
+	Ac_nice                   uint8
+	_                         [6]byte
+	Cpu_count                 uint64
+	Cpu_delay_total           uint64
+	Blkio_count               uint64
+	Blkio_delay_total         uint64
+	Swapin_count              uint64
+	Swapin_delay_total        uint64
+	Cpu_run_real_total        uint64
+	Cpu_run_virtual_total     uint64
+	Ac_comm                   [32]int8
+	Ac_sched                  uint8
+	Ac_pad                    [3]uint8
+	_                         [4]byte
+	Ac_uid                    uint32
+	Ac_gid                    uint32
+	Ac_pid                    uint32
+	Ac_ppid                   uint32
+	Ac_btime                  uint32
+	_                         [4]byte
+	Ac_etime                  uint64
+	Ac_utime                  uint64
+	Ac_stime                  uint64
+	Ac_minflt                 uint64
+	Ac_majflt                 uint64
+	Coremem                   uint64
+	Virtmem                   uint64
+	Hiwater_rss               uint64
+	Hiwater_vm                uint64
+	Read_char                 uint64
+	Write_char                uint64
+	Read_syscalls             uint64
+	Write_syscalls            uint64
+	Read_bytes                uint64
+	Write_bytes               uint64
+	Cancelled_write_bytes     uint64
+	Nvcsw                     uint64
+	Nivcsw                    uint64
+	Ac_utimescaled            uint64
+	Ac_stimescaled            uint64
+	Cpu_scaled_run_real_total uint64
+	Freepages_count           uint64
+	Freepages_delay_total     uint64
+}
+
+const (
+	TASKSTATS_CMD_UNSPEC                  = 0x0
+	TASKSTATS_CMD_GET                     = 0x1
+	TASKSTATS_CMD_NEW                     = 0x2
+	TASKSTATS_TYPE_UNSPEC                 = 0x0
+	TASKSTATS_TYPE_PID                    = 0x1
+	TASKSTATS_TYPE_TGID                   = 0x2
+	TASKSTATS_TYPE_STATS                  = 0x3
+	TASKSTATS_TYPE_AGGR_PID               = 0x4
+	TASKSTATS_TYPE_AGGR_TGID              = 0x5
+	TASKSTATS_TYPE_NULL                   = 0x6
+	TASKSTATS_CMD_ATTR_UNSPEC             = 0x0
+	TASKSTATS_CMD_ATTR_PID                = 0x1
+	TASKSTATS_CMD_ATTR_TGID               = 0x2
+	TASKSTATS_CMD_ATTR_REGISTER_CPUMASK   = 0x3
+	TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4
+)
+
+type Genlmsghdr struct {
+	Cmd      uint8
+	Version  uint8
+	Reserved uint16
+}
+
+const (
+	CTRL_CMD_UNSPEC            = 0x0
+	CTRL_CMD_NEWFAMILY         = 0x1
+	CTRL_CMD_DELFAMILY         = 0x2
+	CTRL_CMD_GETFAMILY         = 0x3
+	CTRL_CMD_NEWOPS            = 0x4
+	CTRL_CMD_DELOPS            = 0x5
+	CTRL_CMD_GETOPS            = 0x6
+	CTRL_CMD_NEWMCAST_GRP      = 0x7
+	CTRL_CMD_DELMCAST_GRP      = 0x8
+	CTRL_CMD_GETMCAST_GRP      = 0x9
+	CTRL_ATTR_UNSPEC           = 0x0
+	CTRL_ATTR_FAMILY_ID        = 0x1
+	CTRL_ATTR_FAMILY_NAME      = 0x2
+	CTRL_ATTR_VERSION          = 0x3
+	CTRL_ATTR_HDRSIZE          = 0x4
+	CTRL_ATTR_MAXATTR          = 0x5
+	CTRL_ATTR_OPS              = 0x6
+	CTRL_ATTR_MCAST_GROUPS     = 0x7
+	CTRL_ATTR_OP_UNSPEC        = 0x0
+	CTRL_ATTR_OP_ID            = 0x1
+	CTRL_ATTR_OP_FLAGS         = 0x2
+	CTRL_ATTR_MCAST_GRP_UNSPEC = 0x0
+	CTRL_ATTR_MCAST_GRP_NAME   = 0x1
+	CTRL_ATTR_MCAST_GRP_ID     = 0x2
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
index 22bdab9..9dbbb1c 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go
@@ -601,12 +601,12 @@
 }
 
 type Utsname struct {
-	Sysname    [65]int8
-	Nodename   [65]int8
-	Release    [65]int8
-	Version    [65]int8
-	Machine    [65]int8
-	Domainname [65]int8
+	Sysname    [65]byte
+	Nodename   [65]byte
+	Release    [65]byte
+	Version    [65]byte
+	Machine    [65]byte
+	Domainname [65]byte
 }
 
 type Ustat_t struct {
@@ -652,8 +652,6 @@
 	X__val [16]uint64
 }
 
-const _SC_PAGESIZE = 0x1e
-
 type Termios struct {
 	Iflag  uint32
 	Oflag  uint32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
index 42f99c0..dfe446b 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go
@@ -1,5 +1,5 @@
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_netbsd.go
+// cgo -godefs types_netbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
 
 // +build 386,netbsd
 
@@ -387,6 +387,25 @@
 	AT_SYMLINK_NOFOLLOW = 0x200
 )
 
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
+
 type Sysctlnode struct {
 	Flags           uint32
 	Num             int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
index ff290ba..1498c23 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go
@@ -1,5 +1,5 @@
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_netbsd.go
+// cgo -godefs types_netbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
 
 // +build amd64,netbsd
 
@@ -394,6 +394,25 @@
 	AT_SYMLINK_NOFOLLOW = 0x200
 )
 
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
+
 type Sysctlnode struct {
 	Flags           uint32
 	Num             int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
index 66dbd7c..d6711ce 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go
@@ -1,5 +1,5 @@
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_netbsd.go
+// cgo -godefs types_netbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
 
 // +build arm,netbsd
 
@@ -392,6 +392,25 @@
 	AT_SYMLINK_NOFOLLOW = 0x200
 )
 
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
+
 type Sysctlnode struct {
 	Flags           uint32
 	Num             int32
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
index 20fc9f4..af295c3 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go
@@ -1,5 +1,5 @@
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_openbsd.go
+// cgo -godefs types_openbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
 
 // +build 386,openbsd
 
@@ -444,3 +444,22 @@
 	AT_FDCWD            = -0x64
 	AT_SYMLINK_NOFOLLOW = 0x2
 )
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
index 46fe949..ae153e7 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go
@@ -1,5 +1,5 @@
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_openbsd.go
+// cgo -godefs types_openbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
 
 // +build amd64,openbsd
 
@@ -451,3 +451,22 @@
 	AT_FDCWD            = -0x64
 	AT_SYMLINK_NOFOLLOW = 0x2
 )
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
index 62e1f7c..35bb619 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go
@@ -1,5 +1,5 @@
-// Created by cgo -godefs - DO NOT EDIT
-// cgo -godefs types_openbsd.go
+// cgo -godefs types_openbsd.go | go run mkpost.go
+// Code generated by the command above; see README.md. DO NOT EDIT.
 
 // +build arm,openbsd
 
@@ -437,3 +437,22 @@
 	AT_FDCWD            = -0x64
 	AT_SYMLINK_NOFOLLOW = 0x2
 )
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
index 92336f9..d445452 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go
@@ -263,11 +263,11 @@
 }
 
 type Utsname struct {
-	Sysname  [257]int8
-	Nodename [257]int8
-	Release  [257]int8
-	Version  [257]int8
-	Machine  [257]int8
+	Sysname  [257]byte
+	Nodename [257]byte
+	Release  [257]byte
+	Version  [257]byte
+	Machine  [257]byte
 }
 
 type Ustat_t struct {
@@ -413,8 +413,6 @@
 	Pad_cgo_0 [2]byte
 }
 
-const _SC_PAGESIZE = 0xb
-
 type Termios struct {
 	Iflag     uint32
 	Oflag     uint32
@@ -440,3 +438,22 @@
 	Xpixel uint16
 	Ypixel uint16
 }
+
+type PollFd struct {
+	Fd      int32
+	Events  int16
+	Revents int16
+}
+
+const (
+	POLLERR    = 0x8
+	POLLHUP    = 0x10
+	POLLIN     = 0x1
+	POLLNVAL   = 0x20
+	POLLOUT    = 0x4
+	POLLPRI    = 0x2
+	POLLRDBAND = 0x80
+	POLLRDNORM = 0x40
+	POLLWRBAND = 0x100
+	POLLWRNORM = 0x4
+)
diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go
index e77a370..e92c05b 100644
--- a/vendor/golang.org/x/sys/windows/dll_windows.go
+++ b/vendor/golang.org/x/sys/windows/dll_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors.  All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -116,7 +116,7 @@
 
 //go:uintptrescapes
 
-// Call executes procedure p with arguments a. It will panic, if more then 15 arguments
+// Call executes procedure p with arguments a. It will panic, if more than 15 arguments
 // are supplied.
 //
 // The returned error is always non-nil, constructed from the result of GetLastError.
@@ -289,6 +289,7 @@
 
 // Addr returns the address of the procedure represented by p.
 // The return value can be passed to Syscall to run the procedure.
+// It will panic if the procedure cannot be found.
 func (p *LazyProc) Addr() uintptr {
 	p.mustFind()
 	return p.proc.Addr()
@@ -296,8 +297,8 @@
 
 //go:uintptrescapes
 
-// Call executes procedure p with arguments a. It will panic, if more then 15 arguments
-// are supplied.
+// Call executes procedure p with arguments a. It will panic, if more than 15 arguments
+// are supplied. It will also panic if the procedure cannot be found.
 //
 // The returned error is always non-nil, constructed from the result of GetLastError.
 // Callers must inspect the primary return value to decide whether an error occurred
diff --git a/vendor/golang.org/x/sys/windows/env_unset.go b/vendor/golang.org/x/sys/windows/env_unset.go
index 4ed03ae..b712c66 100644
--- a/vendor/golang.org/x/sys/windows/env_unset.go
+++ b/vendor/golang.org/x/sys/windows/env_unset.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Go Authors.  All rights reserved.
+// Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/env_windows.go b/vendor/golang.org/x/sys/windows/env_windows.go
index a9d8ef4..e829238 100644
--- a/vendor/golang.org/x/sys/windows/env_windows.go
+++ b/vendor/golang.org/x/sys/windows/env_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2010 The Go Authors.  All rights reserved.
+// Copyright 2010 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/memory_windows.go b/vendor/golang.org/x/sys/windows/memory_windows.go
index f63e899..f80a420 100644
--- a/vendor/golang.org/x/sys/windows/memory_windows.go
+++ b/vendor/golang.org/x/sys/windows/memory_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2017 The Go Authors.  All rights reserved.
+// Copyright 2017 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go
index e1c88c9..fb7db0e 100644
--- a/vendor/golang.org/x/sys/windows/mksyscall.go
+++ b/vendor/golang.org/x/sys/windows/mksyscall.go
@@ -1,4 +1,4 @@
-// Copyright 2009 The Go Authors.  All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/race.go b/vendor/golang.org/x/sys/windows/race.go
index 343e18a..a74e3e2 100644
--- a/vendor/golang.org/x/sys/windows/race.go
+++ b/vendor/golang.org/x/sys/windows/race.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors.  All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/race0.go b/vendor/golang.org/x/sys/windows/race0.go
index 17af843..e44a3cb 100644
--- a/vendor/golang.org/x/sys/windows/race0.go
+++ b/vendor/golang.org/x/sys/windows/race0.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors.  All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index ca09bdd..d8e7ff2 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2012 The Go Authors.  All rights reserved.
+// Copyright 2012 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/svc/go12.go b/vendor/golang.org/x/sys/windows/svc/go12.go
index 6f0a924..cd8b913 100644
--- a/vendor/golang.org/x/sys/windows/svc/go12.go
+++ b/vendor/golang.org/x/sys/windows/svc/go12.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Go Authors.  All rights reserved.
+// Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/svc/go13.go b/vendor/golang.org/x/sys/windows/svc/go13.go
index 432a9e7..9d7f3ce 100644
--- a/vendor/golang.org/x/sys/windows/svc/go13.go
+++ b/vendor/golang.org/x/sys/windows/svc/go13.go
@@ -1,4 +1,4 @@
-// Copyright 2014 The Go Authors.  All rights reserved.
+// Copyright 2014 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go
index 4e2fbe8..b07bc23 100644
--- a/vendor/golang.org/x/sys/windows/syscall.go
+++ b/vendor/golang.org/x/sys/windows/syscall.go
@@ -5,10 +5,10 @@
 // +build windows
 
 // Package windows contains an interface to the low-level operating system
-// primitives.  OS details vary depending on the underlying system, and
+// primitives. OS details vary depending on the underlying system, and
 // by default, godoc will display the OS-specific documentation for the current
-// system.  If you want godoc to display syscall documentation for another
-// system, set $GOOS and $GOARCH to the desired system.  For example, if
+// system. If you want godoc to display syscall documentation for another
+// system, set $GOOS and $GOARCH to the desired system. For example, if
 // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
 // to freebsd and $GOARCH to arm.
 // The primary use of this package is inside other packages that provide a more
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index c7ff24b..bb778db 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2009 The Go Authors.  All rights reserved.
+// Copyright 2009 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
@@ -71,12 +71,17 @@
 
 func Getpagesize() int { return 4096 }
 
-// Converts a Go function to a function pointer conforming
-// to the stdcall or cdecl calling convention.  This is useful when
-// interoperating with Windows code requiring callbacks.
-// Implemented in runtime/syscall_windows.goc
-func NewCallback(fn interface{}) uintptr
-func NewCallbackCDecl(fn interface{}) uintptr
+// NewCallback converts a Go function to a function pointer conforming to the stdcall calling convention.
+// This is useful when interoperating with Windows code requiring callbacks.
+func NewCallback(fn interface{}) uintptr {
+	return syscall.NewCallback(fn)
+}
+
+// NewCallbackCDecl converts a Go function to a function pointer conforming to the cdecl calling convention.
+// This is useful when interoperating with Windows code requiring callbacks.
+func NewCallbackCDecl(fn interface{}) uintptr {
+	return syscall.NewCallbackCDecl(fn)
+}
 
 // windows api calls
 
@@ -197,6 +202,21 @@
 
 // syscall interface implementation for other packages
 
+// GetProcAddressByOrdinal retrieves the address of the exported
+// function from module by ordinal.
+func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) {
+	r0, _, e1 := syscall.Syscall(procGetProcAddress.Addr(), 2, uintptr(module), ordinal, 0)
+	proc = uintptr(r0)
+	if proc == 0 {
+		if e1 != 0 {
+			err = errnoErr(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
+
 func Exit(code int) { ExitProcess(uint32(code)) }
 
 func makeInheritSa() *SecurityAttributes {
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 401a5f2..0229f79 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors.  All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/types_windows_386.go b/vendor/golang.org/x/sys/windows/types_windows_386.go
index 10f33be..fe0ddd0 100644
--- a/vendor/golang.org/x/sys/windows/types_windows_386.go
+++ b/vendor/golang.org/x/sys/windows/types_windows_386.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors.  All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/vendor/golang.org/x/sys/windows/types_windows_amd64.go b/vendor/golang.org/x/sys/windows/types_windows_amd64.go
index 3f272c2..7e154c2 100644
--- a/vendor/golang.org/x/sys/windows/types_windows_amd64.go
+++ b/vendor/golang.org/x/sys/windows/types_windows_amd64.go
@@ -1,4 +1,4 @@
-// Copyright 2011 The Go Authors.  All rights reserved.
+// Copyright 2011 The Go Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
diff --git a/volume/drivers/extpoint.go b/volume/drivers/extpoint.go
index ee42f2f..c360b37 100644
--- a/volume/drivers/extpoint.go
+++ b/volume/drivers/extpoint.go
@@ -11,6 +11,7 @@
 	getter "github.com/docker/docker/pkg/plugingetter"
 	"github.com/docker/docker/volume"
 	"github.com/pkg/errors"
+	"github.com/sirupsen/logrus"
 )
 
 // currently created by hand. generation tool would generate this like:
@@ -130,6 +131,12 @@
 
 		d := NewVolumeDriver(p.Name(), p.BasePath(), p.Client())
 		if err := validateDriver(d); err != nil {
+			if mode > 0 {
+				// Undo any reference count changes from the initial `Get`
+				if _, err := drivers.plugingetter.Get(name, extName, mode*-1); err != nil {
+					logrus.WithError(err).WithField("action", "validate-driver").WithField("plugin", name).Error("error releasing reference to plugin")
+				}
+			}
 			return nil, err
 		}
 
@@ -169,9 +176,9 @@
 	return lookup(name, getter.Acquire)
 }
 
-// RemoveDriver returns a volume driver by its name and decrements RefCount..
+// ReleaseDriver returns a volume driver by its name and decrements RefCount..
 // If the driver is empty, it looks for the local driver.
-func RemoveDriver(name string) (volume.Driver, error) {
+func ReleaseDriver(name string) (volume.Driver, error) {
 	if name == "" {
 		name = volume.DefaultDriverName
 	}
diff --git a/volume/lcow_parser.go b/volume/lcow_parser.go
new file mode 100644
index 0000000..aeb81a4
--- /dev/null
+++ b/volume/lcow_parser.go
@@ -0,0 +1,35 @@
+package volume
+
+import (
+	"errors"
+	"fmt"
+	"path"
+
+	"github.com/docker/docker/api/types/mount"
+)
+
+var lcowSpecificValidators mountValidator = func(m *mount.Mount) error {
+	if path.Clean(m.Target) == "/" {
+		return fmt.Errorf("invalid specification: destination can't be '/'")
+	}
+	if m.Type == mount.TypeNamedPipe {
+		return errors.New("Linux containers on Windows do not support named pipe mounts")
+	}
+	return nil
+}
+
+type lcowParser struct {
+	windowsParser
+}
+
+func (p *lcowParser) validateMountConfig(mnt *mount.Mount) error {
+	return p.validateMountConfigReg(mnt, rxLCOWDestination, lcowSpecificValidators)
+}
+
+func (p *lcowParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
+	return p.parseMountRaw(raw, volumeDriver, rxLCOWDestination, false, lcowSpecificValidators)
+}
+
+func (p *lcowParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
+	return p.parseMountSpec(cfg, rxLCOWDestination, false, lcowSpecificValidators)
+}
diff --git a/volume/linux_parser.go b/volume/linux_parser.go
new file mode 100644
index 0000000..59605fe
--- /dev/null
+++ b/volume/linux_parser.go
@@ -0,0 +1,416 @@
+package volume
+
+import (
+	"errors"
+	"fmt"
+	"path"
+	"path/filepath"
+	"strings"
+
+	"github.com/docker/docker/api/types/mount"
+	"github.com/docker/docker/pkg/stringid"
+)
+
+type linuxParser struct {
+}
+
+func linuxSplitRawSpec(raw string) ([]string, error) {
+	if strings.Count(raw, ":") > 2 {
+		return nil, errInvalidSpec(raw)
+	}
+
+	arr := strings.SplitN(raw, ":", 3)
+	if arr[0] == "" {
+		return nil, errInvalidSpec(raw)
+	}
+	return arr, nil
+}
+
+func linuxValidateNotRoot(p string) error {
+	p = path.Clean(strings.Replace(p, `\`, `/`, -1))
+	if p == "/" {
+		return fmt.Errorf("invalid specification: destination can't be '/'")
+	}
+	return nil
+}
+func linuxValidateAbsolute(p string) error {
+	p = strings.Replace(p, `\`, `/`, -1)
+	if path.IsAbs(p) {
+		return nil
+	}
+	return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p)
+}
+func (p *linuxParser) validateMountConfig(mnt *mount.Mount) error {
+	// there was something looking like a bug in existing codebase:
+	// - validateMountConfig on linux was called with options skipping bind source existence when calling ParseMountRaw
+	// - but not when calling ParseMountSpec directly... nor when the unit test called it directly
+	return p.validateMountConfigImpl(mnt, true)
+}
+func (p *linuxParser) validateMountConfigImpl(mnt *mount.Mount, validateBindSourceExists bool) error {
+	if len(mnt.Target) == 0 {
+		return &errMountConfig{mnt, errMissingField("Target")}
+	}
+
+	if err := linuxValidateNotRoot(mnt.Target); err != nil {
+		return &errMountConfig{mnt, err}
+	}
+
+	if err := linuxValidateAbsolute(mnt.Target); err != nil {
+		return &errMountConfig{mnt, err}
+	}
+
+	switch mnt.Type {
+	case mount.TypeBind:
+		if len(mnt.Source) == 0 {
+			return &errMountConfig{mnt, errMissingField("Source")}
+		}
+		// Don't error out just because the propagation mode is not supported on the platform
+		if opts := mnt.BindOptions; opts != nil {
+			if len(opts.Propagation) > 0 && len(linuxPropagationModes) > 0 {
+				if _, ok := linuxPropagationModes[opts.Propagation]; !ok {
+					return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)}
+				}
+			}
+		}
+		if mnt.VolumeOptions != nil {
+			return &errMountConfig{mnt, errExtraField("VolumeOptions")}
+		}
+
+		if err := linuxValidateAbsolute(mnt.Source); err != nil {
+			return &errMountConfig{mnt, err}
+		}
+
+		if validateBindSourceExists {
+			exists, _, _ := currentFileInfoProvider.fileInfo(mnt.Source)
+			if !exists {
+				return &errMountConfig{mnt, errBindNotExist}
+			}
+		}
+
+	case mount.TypeVolume:
+		if mnt.BindOptions != nil {
+			return &errMountConfig{mnt, errExtraField("BindOptions")}
+		}
+
+		if len(mnt.Source) == 0 && mnt.ReadOnly {
+			return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")}
+		}
+	case mount.TypeTmpfs:
+		if len(mnt.Source) != 0 {
+			return &errMountConfig{mnt, errExtraField("Source")}
+		}
+		if _, err := p.ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil {
+			return &errMountConfig{mnt, err}
+		}
+	default:
+		return &errMountConfig{mnt, errors.New("mount type unknown")}
+	}
+	return nil
+}
+
+// read-write modes
+var rwModes = map[string]bool{
+	"rw": true,
+	"ro": true,
+}
+
+// label modes
+var linuxLabelModes = map[string]bool{
+	"Z": true,
+	"z": true,
+}
+
+// consistency modes
+var linuxConsistencyModes = map[mount.Consistency]bool{
+	mount.ConsistencyFull:      true,
+	mount.ConsistencyCached:    true,
+	mount.ConsistencyDelegated: true,
+}
+var linuxPropagationModes = map[mount.Propagation]bool{
+	mount.PropagationPrivate:  true,
+	mount.PropagationRPrivate: true,
+	mount.PropagationSlave:    true,
+	mount.PropagationRSlave:   true,
+	mount.PropagationShared:   true,
+	mount.PropagationRShared:  true,
+}
+
+const linuxDefaultPropagationMode = mount.PropagationRPrivate
+
+func linuxGetPropagation(mode string) mount.Propagation {
+	for _, o := range strings.Split(mode, ",") {
+		prop := mount.Propagation(o)
+		if linuxPropagationModes[prop] {
+			return prop
+		}
+	}
+	return linuxDefaultPropagationMode
+}
+
+func linuxHasPropagation(mode string) bool {
+	for _, o := range strings.Split(mode, ",") {
+		if linuxPropagationModes[mount.Propagation(o)] {
+			return true
+		}
+	}
+	return false
+}
+
+func linuxValidMountMode(mode string) bool {
+	if mode == "" {
+		return true
+	}
+
+	rwModeCount := 0
+	labelModeCount := 0
+	propagationModeCount := 0
+	copyModeCount := 0
+	consistencyModeCount := 0
+
+	for _, o := range strings.Split(mode, ",") {
+		switch {
+		case rwModes[o]:
+			rwModeCount++
+		case linuxLabelModes[o]:
+			labelModeCount++
+		case linuxPropagationModes[mount.Propagation(o)]:
+			propagationModeCount++
+		case copyModeExists(o):
+			copyModeCount++
+		case linuxConsistencyModes[mount.Consistency(o)]:
+			consistencyModeCount++
+		default:
+			return false
+		}
+	}
+
+	// Only one string for each mode is allowed.
+	if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 {
+		return false
+	}
+	return true
+}
+
+func (p *linuxParser) ReadWrite(mode string) bool {
+	if !linuxValidMountMode(mode) {
+		return false
+	}
+
+	for _, o := range strings.Split(mode, ",") {
+		if o == "ro" {
+			return false
+		}
+	}
+	return true
+}
+
+func (p *linuxParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
+	arr, err := linuxSplitRawSpec(raw)
+	if err != nil {
+		return nil, err
+	}
+
+	var spec mount.Mount
+	var mode string
+	switch len(arr) {
+	case 1:
+		// Just a destination path in the container
+		spec.Target = arr[0]
+	case 2:
+		if linuxValidMountMode(arr[1]) {
+			// Destination + Mode is not a valid volume - volumes
+			// cannot include a mode. e.g. /foo:rw
+			return nil, errInvalidSpec(raw)
+		}
+		// Host Source Path or Name + Destination
+		spec.Source = arr[0]
+		spec.Target = arr[1]
+	case 3:
+		// HostSourcePath+DestinationPath+Mode
+		spec.Source = arr[0]
+		spec.Target = arr[1]
+		mode = arr[2]
+	default:
+		return nil, errInvalidSpec(raw)
+	}
+
+	if !linuxValidMountMode(mode) {
+		return nil, errInvalidMode(mode)
+	}
+
+	if path.IsAbs(spec.Source) {
+		spec.Type = mount.TypeBind
+	} else {
+		spec.Type = mount.TypeVolume
+	}
+
+	spec.ReadOnly = !p.ReadWrite(mode)
+
+	// cannot assume that if a volume driver is passed in that we should set it
+	if volumeDriver != "" && spec.Type == mount.TypeVolume {
+		spec.VolumeOptions = &mount.VolumeOptions{
+			DriverConfig: &mount.Driver{Name: volumeDriver},
+		}
+	}
+
+	if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet {
+		if spec.VolumeOptions == nil {
+			spec.VolumeOptions = &mount.VolumeOptions{}
+		}
+		spec.VolumeOptions.NoCopy = !copyData
+	}
+	if linuxHasPropagation(mode) {
+		spec.BindOptions = &mount.BindOptions{
+			Propagation: linuxGetPropagation(mode),
+		}
+	}
+
+	mp, err := p.parseMountSpec(spec, false)
+	if mp != nil {
+		mp.Mode = mode
+	}
+	if err != nil {
+		err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err)
+	}
+	return mp, err
+}
+func (p *linuxParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
+	return p.parseMountSpec(cfg, true)
+}
+func (p *linuxParser) parseMountSpec(cfg mount.Mount, validateBindSourceExists bool) (*MountPoint, error) {
+	if err := p.validateMountConfigImpl(&cfg, validateBindSourceExists); err != nil {
+		return nil, err
+	}
+	mp := &MountPoint{
+		RW:          !cfg.ReadOnly,
+		Destination: path.Clean(filepath.ToSlash(cfg.Target)),
+		Type:        cfg.Type,
+		Spec:        cfg,
+	}
+
+	switch cfg.Type {
+	case mount.TypeVolume:
+		if cfg.Source == "" {
+			mp.Name = stringid.GenerateNonCryptoID()
+		} else {
+			mp.Name = cfg.Source
+		}
+		mp.CopyData = p.DefaultCopyMode()
+
+		if cfg.VolumeOptions != nil {
+			if cfg.VolumeOptions.DriverConfig != nil {
+				mp.Driver = cfg.VolumeOptions.DriverConfig.Name
+			}
+			if cfg.VolumeOptions.NoCopy {
+				mp.CopyData = false
+			}
+		}
+	case mount.TypeBind:
+		mp.Source = path.Clean(filepath.ToSlash(cfg.Source))
+		if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 {
+			mp.Propagation = cfg.BindOptions.Propagation
+		} else {
+			// If user did not specify a propagation mode, get
+			// default propagation mode.
+			mp.Propagation = linuxDefaultPropagationMode
+		}
+	case mount.TypeTmpfs:
+		// NOP
+	}
+	return mp, nil
+}
+
+func (p *linuxParser) ParseVolumesFrom(spec string) (string, string, error) {
+	if len(spec) == 0 {
+		return "", "", fmt.Errorf("volumes-from specification cannot be an empty string")
+	}
+
+	specParts := strings.SplitN(spec, ":", 2)
+	id := specParts[0]
+	mode := "rw"
+
+	if len(specParts) == 2 {
+		mode = specParts[1]
+		if !linuxValidMountMode(mode) {
+			return "", "", errInvalidMode(mode)
+		}
+		// For now don't allow propagation properties while importing
+		// volumes from data container. These volumes will inherit
+		// the same propagation property as of the original volume
+		// in data container. This probably can be relaxed in future.
+		if linuxHasPropagation(mode) {
+			return "", "", errInvalidMode(mode)
+		}
+		// Do not allow copy modes on volumes-from
+		if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet {
+			return "", "", errInvalidMode(mode)
+		}
+	}
+	return id, mode, nil
+}
+
+func (p *linuxParser) DefaultPropagationMode() mount.Propagation {
+	return linuxDefaultPropagationMode
+}
+
+func (p *linuxParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) {
+	var rawOpts []string
+	if readOnly {
+		rawOpts = append(rawOpts, "ro")
+	}
+
+	if opt != nil && opt.Mode != 0 {
+		rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode))
+	}
+
+	if opt != nil && opt.SizeBytes != 0 {
+		// calculate suffix here, making this linux specific, but that is
+		// okay, since API is that way anyways.
+
+		// we do this by finding the suffix that divides evenly into the
+		// value, returning the value itself, with no suffix, if it fails.
+		//
+		// For the most part, we don't enforce any semantic to this values.
+		// The operating system will usually align this and enforce minimum
+		// and maximums.
+		var (
+			size   = opt.SizeBytes
+			suffix string
+		)
+		for _, r := range []struct {
+			suffix  string
+			divisor int64
+		}{
+			{"g", 1 << 30},
+			{"m", 1 << 20},
+			{"k", 1 << 10},
+		} {
+			if size%r.divisor == 0 {
+				size = size / r.divisor
+				suffix = r.suffix
+				break
+			}
+		}
+
+		rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix))
+	}
+	return strings.Join(rawOpts, ","), nil
+}
+
+func (p *linuxParser) DefaultCopyMode() bool {
+	return true
+}
+func (p *linuxParser) ValidateVolumeName(name string) error {
+	return nil
+}
+
+func (p *linuxParser) IsBackwardCompatible(m *MountPoint) bool {
+	return len(m.Source) > 0 || m.Driver == DefaultDriverName
+}
+
+func (p *linuxParser) ValidateTmpfsMountDestination(dest string) error {
+	if err := linuxValidateNotRoot(dest); err != nil {
+		return err
+	}
+	return linuxValidateAbsolute(dest)
+}
diff --git a/volume/local/local.go b/volume/local/local.go
index eb78d87..b37c45e 100644
--- a/volume/local/local.go
+++ b/volume/local/local.go
@@ -13,7 +13,7 @@
 	"strings"
 	"sync"
 
-	"github.com/docker/docker/api"
+	"github.com/docker/docker/daemon/names"
 	"github.com/docker/docker/pkg/idtools"
 	"github.com/docker/docker/pkg/mount"
 	"github.com/docker/docker/volume"
@@ -35,7 +35,7 @@
 	// volumeNameRegex ensures the name assigned for the volume is valid.
 	// This name is used to create the bind directory, so we need to avoid characters that
 	// would make the path to escape the root directory.
-	volumeNameRegex = api.RestrictedNamePattern
+	volumeNameRegex = names.RestrictedNamePattern
 )
 
 type activeMount struct {
@@ -298,7 +298,7 @@
 		return validationError("volume name is too short, names should be at least two alphanumeric characters")
 	}
 	if !volumeNameRegex.MatchString(name) {
-		return validationError(fmt.Sprintf("%q includes invalid characters for a local volume name, only %q are allowed. If you intended to pass a host directory, use absolute path", name, api.RestrictedNameChars))
+		return validationError(fmt.Sprintf("%q includes invalid characters for a local volume name, only %q are allowed. If you intended to pass a host directory, use absolute path", name, names.RestrictedNameChars))
 	}
 	return nil
 }
@@ -334,6 +334,11 @@
 	return v.path
 }
 
+// CachedPath returns the data location
+func (v *localVolume) CachedPath() string {
+	return v.path
+}
+
 // Mount implements the localVolume interface, returning the data location.
 // If there are any provided mount options, the resources will be mounted at this point
 func (v *localVolume) Mount(id string) (string, error) {
diff --git a/volume/local/local_test.go b/volume/local/local_test.go
index 2353391..2d95919 100644
--- a/volume/local/local_test.go
+++ b/volume/local/local_test.go
@@ -181,7 +181,7 @@
 }
 
 func TestCreateWithOpts(t *testing.T) {
-	if runtime.GOOS == "windows" || runtime.GOOS == "solaris" {
+	if runtime.GOOS == "windows" {
 		t.Skip()
 	}
 	rootDir, err := ioutil.TempDir("", "local-volume-test")
diff --git a/volume/local/local_unix.go b/volume/local/local_unix.go
index 5bba5b7..6226955 100644
--- a/volume/local/local_unix.go
+++ b/volume/local/local_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris
+// +build linux freebsd
 
 // Package local provides the default implementation for volumes. It
 // is used to mount data volume containers and directories local to
diff --git a/volume/parser.go b/volume/parser.go
new file mode 100644
index 0000000..1f48b60
--- /dev/null
+++ b/volume/parser.go
@@ -0,0 +1,43 @@
+package volume
+
+import (
+	"runtime"
+
+	"github.com/docker/docker/api/types/mount"
+)
+
+const (
+	// OSLinux is the same as runtime.GOOS on linux
+	OSLinux = "linux"
+	// OSWindows is the same as runtime.GOOS on windows
+	OSWindows = "windows"
+)
+
+// Parser represents a platform specific parser for mount expressions
+type Parser interface {
+	ParseMountRaw(raw, volumeDriver string) (*MountPoint, error)
+	ParseMountSpec(cfg mount.Mount) (*MountPoint, error)
+	ParseVolumesFrom(spec string) (string, string, error)
+	DefaultPropagationMode() mount.Propagation
+	ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error)
+	DefaultCopyMode() bool
+	ValidateVolumeName(name string) error
+	ReadWrite(mode string) bool
+	IsBackwardCompatible(m *MountPoint) bool
+	HasResource(m *MountPoint, absPath string) bool
+	ValidateTmpfsMountDestination(dest string) error
+
+	validateMountConfig(mt *mount.Mount) error
+}
+
+// NewParser creates a parser for a given container OS, depending on the current host OS (linux on a windows host will resolve to an lcowParser)
+func NewParser(containerOS string) Parser {
+	switch containerOS {
+	case OSWindows:
+		return &windowsParser{}
+	}
+	if runtime.GOOS == OSWindows {
+		return &lcowParser{}
+	}
+	return &linuxParser{}
+}
diff --git a/volume/store/errors.go b/volume/store/errors.go
index 13c7765..75e24e6 100644
--- a/volume/store/errors.go
+++ b/volume/store/errors.go
@@ -9,8 +9,6 @@
 	errVolumeInUse conflictError = "volume is in use"
 	// errNoSuchVolume is a typed error returned if the requested volume doesn't exist in the volume store
 	errNoSuchVolume notFoundError = "no such volume"
-	// errInvalidName is a typed error returned when creating a volume with a name that is not valid on the platform
-	errInvalidName invalidName = "volume name is not valid on this platform"
 	// errNameConflict is a typed error returned on create when a volume exists with the given name, but for a different driver
 	errNameConflict conflictError = "volume name must be unique"
 )
@@ -30,13 +28,6 @@
 
 func (notFoundError) NotFound() {}
 
-type invalidName string
-
-func (e invalidName) Error() string {
-	return string(e)
-}
-func (invalidName) InvalidParameter() {}
-
 // OpErr is the error type returned by functions in the store package. It describes
 // the operation, volume name, and error.
 type OpErr struct {
diff --git a/volume/store/store.go b/volume/store/store.go
index bc44761..5402c6b 100644
--- a/volume/store/store.go
+++ b/volume/store/store.go
@@ -4,6 +4,7 @@
 	"net"
 	"os"
 	"path/filepath"
+	"runtime"
 	"sync"
 	"time"
 
@@ -144,7 +145,7 @@
 	s.globalLock.Lock()
 	v, exists := s.names[name]
 	if exists {
-		if _, err := volumedrivers.RemoveDriver(v.DriverName()); err != nil {
+		if _, err := volumedrivers.ReleaseDriver(v.DriverName()); err != nil {
 			logrus.Errorf("Error dereferencing volume driver: %v", err)
 		}
 	}
@@ -369,13 +370,14 @@
 // It is expected that callers of this function hold any necessary locks.
 func (s *VolumeStore) create(name, driverName string, opts, labels map[string]string) (volume.Volume, error) {
 	// Validate the name in a platform-specific manner
-	valid, err := volume.IsVolumeNameValid(name)
+
+	// volume name validation is specific to the host os and not on container image
+	// windows/lcow should have an equivalent volumename validation logic so we create a parser for current host OS
+	parser := volume.NewParser(runtime.GOOS)
+	err := parser.ValidateVolumeName(name)
 	if err != nil {
 		return nil, err
 	}
-	if !valid {
-		return nil, &OpErr{Err: errInvalidName, Name: name, Op: "create"}
-	}
 
 	v, err := s.checkConflict(name, driverName)
 	if err != nil {
@@ -407,6 +409,9 @@
 	}
 	v, err = vd.Create(name, opts)
 	if err != nil {
+		if _, err := volumedrivers.ReleaseDriver(driverName); err != nil {
+			logrus.WithError(err).WithField("driver", driverName).Error("Error releasing reference to volume driver")
+		}
 		return nil, err
 	}
 	s.globalLock.Lock()
diff --git a/volume/store/store_unix.go b/volume/store/store_unix.go
index c024abb..065cb28 100644
--- a/volume/store/store_unix.go
+++ b/volume/store/store_unix.go
@@ -1,4 +1,4 @@
-// +build linux freebsd solaris
+// +build linux freebsd
 
 package store
 
diff --git a/volume/validate.go b/volume/validate.go
index 04883f3..b3f6409 100644
--- a/volume/validate.go
+++ b/volume/validate.go
@@ -2,8 +2,6 @@
 
 import (
 	"fmt"
-	"os"
-	"runtime"
 
 	"github.com/docker/docker/api/types/mount"
 	"github.com/pkg/errors"
@@ -11,120 +9,6 @@
 
 var errBindNotExist = errors.New("bind source path does not exist")
 
-type validateOpts struct {
-	skipBindSourceCheck bool
-}
-
-func validateMountConfig(mnt *mount.Mount, options ...func(*validateOpts)) error {
-	opts := validateOpts{}
-	for _, o := range options {
-		o(&opts)
-	}
-
-	if len(mnt.Target) == 0 {
-		return &errMountConfig{mnt, errMissingField("Target")}
-	}
-
-	if err := validateNotRoot(mnt.Target); err != nil {
-		return &errMountConfig{mnt, err}
-	}
-
-	if err := validateAbsolute(mnt.Target); err != nil {
-		return &errMountConfig{mnt, err}
-	}
-
-	switch mnt.Type {
-	case mount.TypeBind:
-		if len(mnt.Source) == 0 {
-			return &errMountConfig{mnt, errMissingField("Source")}
-		}
-		// Don't error out just because the propagation mode is not supported on the platform
-		if opts := mnt.BindOptions; opts != nil {
-			if len(opts.Propagation) > 0 && len(propagationModes) > 0 {
-				if _, ok := propagationModes[opts.Propagation]; !ok {
-					return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)}
-				}
-			}
-		}
-		if mnt.VolumeOptions != nil {
-			return &errMountConfig{mnt, errExtraField("VolumeOptions")}
-		}
-
-		if err := validateAbsolute(mnt.Source); err != nil {
-			return &errMountConfig{mnt, err}
-		}
-
-		// Do not allow binding to non-existent path
-		if !opts.skipBindSourceCheck {
-			fi, err := os.Stat(mnt.Source)
-			if err != nil {
-				if !os.IsNotExist(err) {
-					return &errMountConfig{mnt, err}
-				}
-				return &errMountConfig{mnt, errBindNotExist}
-			}
-			if err := validateStat(fi); err != nil {
-				return &errMountConfig{mnt, err}
-			}
-		}
-	case mount.TypeVolume:
-		if mnt.BindOptions != nil {
-			return &errMountConfig{mnt, errExtraField("BindOptions")}
-		}
-
-		if len(mnt.Source) == 0 && mnt.ReadOnly {
-			return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")}
-		}
-
-		if len(mnt.Source) != 0 {
-			if valid, err := IsVolumeNameValid(mnt.Source); !valid {
-				if err == nil {
-					err = errors.New("invalid volume name")
-				}
-				return &errMountConfig{mnt, err}
-			}
-		}
-	case mount.TypeTmpfs:
-		if len(mnt.Source) != 0 {
-			return &errMountConfig{mnt, errExtraField("Source")}
-		}
-		if err := ValidateTmpfsMountDestination(mnt.Target); err != nil {
-			return &errMountConfig{mnt, err}
-		}
-		if _, err := ConvertTmpfsOptions(mnt.TmpfsOptions, mnt.ReadOnly); err != nil {
-			return &errMountConfig{mnt, err}
-		}
-	case mount.TypeNamedPipe:
-		if runtime.GOOS != "windows" {
-			return &errMountConfig{mnt, errors.New("named pipe bind mounts are not supported on this OS")}
-		}
-
-		if len(mnt.Source) == 0 {
-			return &errMountConfig{mnt, errMissingField("Source")}
-		}
-
-		if mnt.BindOptions != nil {
-			return &errMountConfig{mnt, errExtraField("BindOptions")}
-		}
-
-		if mnt.ReadOnly {
-			return &errMountConfig{mnt, errExtraField("ReadOnly")}
-		}
-
-		if detectMountType(mnt.Source) != mount.TypeNamedPipe {
-			return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)}
-		}
-
-		if detectMountType(mnt.Target) != mount.TypeNamedPipe {
-			return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)}
-		}
-
-	default:
-		return &errMountConfig{mnt, errors.New("mount type unknown")}
-	}
-	return nil
-}
-
 type errMountConfig struct {
 	mount *mount.Mount
 	err   error
@@ -140,37 +24,3 @@
 func errMissingField(name string) error {
 	return errors.Errorf("field %s must not be empty", name)
 }
-
-func validateAbsolute(p string) error {
-	p = convertSlash(p)
-	if isAbsPath(p) {
-		return nil
-	}
-	return errors.Errorf("invalid mount path: '%s' mount path must be absolute", p)
-}
-
-// ValidateTmpfsMountDestination validates the destination of tmpfs mount.
-// Currently, we have only two obvious rule for validation:
-//  - path must not be "/"
-//  - path must be absolute
-// We should add more rules carefully (#30166)
-func ValidateTmpfsMountDestination(dest string) error {
-	if err := validateNotRoot(dest); err != nil {
-		return err
-	}
-	return validateAbsolute(dest)
-}
-
-type validationError struct {
-	err error
-}
-
-func (e validationError) Error() string {
-	return e.err.Error()
-}
-
-func (e validationError) InvalidParameter() {}
-
-func (e validationError) Cause() error {
-	return e.err
-}
diff --git a/volume/validate_test.go b/volume/validate_test.go
index 8732500..6a8e286 100644
--- a/volume/validate_test.go
+++ b/volume/validate_test.go
@@ -4,6 +4,7 @@
 	"errors"
 	"io/ioutil"
 	"os"
+	"runtime"
 	"strings"
 	"testing"
 
@@ -27,17 +28,50 @@
 		{mount.Mount{Type: mount.TypeBind}, errMissingField("Target")},
 		{mount.Mount{Type: mount.TypeBind, Target: testDestinationPath}, errMissingField("Source")},
 		{mount.Mount{Type: mount.TypeBind, Target: testDestinationPath, Source: testSourcePath, VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")},
-		{mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist},
+
 		{mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, nil},
 		{mount.Mount{Type: "invalid", Target: testDestinationPath}, errors.New("mount type unknown")},
 	}
+	if runtime.GOOS == "windows" {
+		cases = append(cases, struct {
+			input    mount.Mount
+			expected error
+		}{mount.Mount{Type: mount.TypeBind, Source: testSourcePath, Target: testDestinationPath}, errBindNotExist}) // bind source existance is not checked on linux
+	}
+	lcowCases := []struct {
+		input    mount.Mount
+		expected error
+	}{
+		{mount.Mount{Type: mount.TypeVolume}, errMissingField("Target")},
+		{mount.Mount{Type: mount.TypeVolume, Target: "/foo", Source: "hello"}, nil},
+		{mount.Mount{Type: mount.TypeVolume, Target: "/foo"}, nil},
+		{mount.Mount{Type: mount.TypeBind}, errMissingField("Target")},
+		{mount.Mount{Type: mount.TypeBind, Target: "/foo"}, errMissingField("Source")},
+		{mount.Mount{Type: mount.TypeBind, Target: "/foo", Source: "c:\\foo", VolumeOptions: &mount.VolumeOptions{}}, errExtraField("VolumeOptions")},
+		{mount.Mount{Type: mount.TypeBind, Source: "c:\\foo", Target: "/foo"}, errBindNotExist},
+		{mount.Mount{Type: mount.TypeBind, Source: testDir, Target: "/foo"}, nil},
+		{mount.Mount{Type: "invalid", Target: "/foo"}, errors.New("mount type unknown")},
+	}
+	parser := NewParser(runtime.GOOS)
 	for i, x := range cases {
-		err := validateMountConfig(&x.input)
+		err := parser.validateMountConfig(&x.input)
 		if err == nil && x.expected == nil {
 			continue
 		}
 		if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) {
-			t.Fatalf("expected %q, got %q, case: %d", x.expected, err, i)
+			t.Errorf("expected %q, got %q, case: %d", x.expected, err, i)
+		}
+	}
+	if runtime.GOOS == "windows" {
+		parser = &lcowParser{}
+		for i, x := range lcowCases {
+			err := parser.validateMountConfig(&x.input)
+			if err == nil && x.expected == nil {
+				continue
+			}
+			if (err == nil && x.expected != nil) || (x.expected == nil && err != nil) || !strings.Contains(err.Error(), x.expected.Error()) {
+				t.Errorf("expected %q, got %q, case: %d", x.expected, err, i)
+			}
 		}
 	}
 }
diff --git a/volume/volume.go b/volume/volume.go
index 7f962a9..b8ec1e5 100644
--- a/volume/volume.go
+++ b/volume/volume.go
@@ -3,7 +3,7 @@
 import (
 	"fmt"
 	"os"
-	"strings"
+	"path/filepath"
 	"syscall"
 	"time"
 
@@ -156,13 +156,20 @@
 			return
 		}
 
-		err = label.Relabel(m.Source, mountLabel, label.IsShared(m.Mode))
+		var sourcePath string
+		sourcePath, err = filepath.EvalSymlinks(m.Source)
+		if err != nil {
+			path = ""
+			err = errors.Wrapf(err, "error evaluating symlinks from mount source %q", m.Source)
+			return
+		}
+		err = label.Relabel(sourcePath, mountLabel, label.IsShared(m.Mode))
 		if err == syscall.ENOTSUP {
 			err = nil
 		}
 		if err != nil {
 			path = ""
-			err = errors.Wrapf(err, "error setting label on mount source '%s'", m.Source)
+			err = errors.Wrapf(err, "error setting label on mount source '%s'", sourcePath)
 		}
 	}()
 
@@ -216,153 +223,10 @@
 	return m.Source
 }
 
-// ParseVolumesFrom ensures that the supplied volumes-from is valid.
-func ParseVolumesFrom(spec string) (string, string, error) {
-	if len(spec) == 0 {
-		return "", "", fmt.Errorf("volumes-from specification cannot be an empty string")
-	}
-
-	specParts := strings.SplitN(spec, ":", 2)
-	id := specParts[0]
-	mode := "rw"
-
-	if len(specParts) == 2 {
-		mode = specParts[1]
-		if !ValidMountMode(mode) {
-			return "", "", errInvalidMode(mode)
-		}
-		// For now don't allow propagation properties while importing
-		// volumes from data container. These volumes will inherit
-		// the same propagation property as of the original volume
-		// in data container. This probably can be relaxed in future.
-		if HasPropagation(mode) {
-			return "", "", errInvalidMode(mode)
-		}
-		// Do not allow copy modes on volumes-from
-		if _, isSet := getCopyMode(mode); isSet {
-			return "", "", errInvalidMode(mode)
-		}
-	}
-	return id, mode, nil
-}
-
-// ParseMountRaw parses a raw volume spec (e.g. `-v /foo:/bar:shared`) into a
-// structured spec. Once the raw spec is parsed it relies on `ParseMountSpec` to
-// validate the spec and create a MountPoint
-func ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
-	arr, err := splitRawSpec(convertSlash(raw))
-	if err != nil {
-		return nil, err
-	}
-
-	var spec mounttypes.Mount
-	var mode string
-	switch len(arr) {
-	case 1:
-		// Just a destination path in the container
-		spec.Target = arr[0]
-	case 2:
-		if ValidMountMode(arr[1]) {
-			// Destination + Mode is not a valid volume - volumes
-			// cannot include a mode. e.g. /foo:rw
-			return nil, errInvalidSpec(raw)
-		}
-		// Host Source Path or Name + Destination
-		spec.Source = arr[0]
-		spec.Target = arr[1]
-	case 3:
-		// HostSourcePath+DestinationPath+Mode
-		spec.Source = arr[0]
-		spec.Target = arr[1]
-		mode = arr[2]
-	default:
-		return nil, errInvalidSpec(raw)
-	}
-
-	if !ValidMountMode(mode) {
-		return nil, errInvalidMode(mode)
-	}
-
-	spec.Type = detectMountType(spec.Source)
-	spec.ReadOnly = !ReadWrite(mode)
-
-	// cannot assume that if a volume driver is passed in that we should set it
-	if volumeDriver != "" && spec.Type == mounttypes.TypeVolume {
-		spec.VolumeOptions = &mounttypes.VolumeOptions{
-			DriverConfig: &mounttypes.Driver{Name: volumeDriver},
-		}
-	}
-
-	if copyData, isSet := getCopyMode(mode); isSet {
-		if spec.VolumeOptions == nil {
-			spec.VolumeOptions = &mounttypes.VolumeOptions{}
-		}
-		spec.VolumeOptions.NoCopy = !copyData
-	}
-	if HasPropagation(mode) {
-		spec.BindOptions = &mounttypes.BindOptions{
-			Propagation: GetPropagation(mode),
-		}
-	}
-
-	mp, err := ParseMountSpec(spec, platformRawValidationOpts...)
-	if mp != nil {
-		mp.Mode = mode
-	}
-	if err != nil {
-		err = errors.Wrap(err, errInvalidSpec(raw).Error())
-	}
-	return mp, err
-}
-
-// ParseMountSpec reads a mount config, validates it, and configures a mountpoint from it.
-func ParseMountSpec(cfg mounttypes.Mount, options ...func(*validateOpts)) (*MountPoint, error) {
-	if err := validateMountConfig(&cfg, options...); err != nil {
-		return nil, validationError{err}
-	}
-	mp := &MountPoint{
-		RW:          !cfg.ReadOnly,
-		Destination: clean(convertSlash(cfg.Target)),
-		Type:        cfg.Type,
-		Spec:        cfg,
-	}
-
-	switch cfg.Type {
-	case mounttypes.TypeVolume:
-		if cfg.Source == "" {
-			mp.Name = stringid.GenerateNonCryptoID()
-		} else {
-			mp.Name = cfg.Source
-		}
-		mp.CopyData = DefaultCopyMode
-
-		if cfg.VolumeOptions != nil {
-			if cfg.VolumeOptions.DriverConfig != nil {
-				mp.Driver = cfg.VolumeOptions.DriverConfig.Name
-			}
-			if cfg.VolumeOptions.NoCopy {
-				mp.CopyData = false
-			}
-		}
-	case mounttypes.TypeBind, mounttypes.TypeNamedPipe:
-		mp.Source = clean(convertSlash(cfg.Source))
-		if cfg.BindOptions != nil && len(cfg.BindOptions.Propagation) > 0 {
-			mp.Propagation = cfg.BindOptions.Propagation
-		} else {
-			// If user did not specify a propagation mode, get
-			// default propagation mode.
-			mp.Propagation = DefaultPropagationMode
-		}
-	case mounttypes.TypeTmpfs:
-		// NOP
-	}
-	return mp, nil
-}
-
 func errInvalidMode(mode string) error {
-	return validationError{errors.Errorf("invalid mode: %v", mode)}
+	return errors.Errorf("invalid mode: %v", mode)
 }
 
 func errInvalidSpec(spec string) error {
-	return validationError{errors.Errorf("invalid volume specification: '%s'", spec)}
+	return errors.Errorf("invalid volume specification: '%s'", spec)
 }
diff --git a/volume/volume_copy.go b/volume/volume_copy.go
index 77f06a0..37c7fa7 100644
--- a/volume/volume_copy.go
+++ b/volume/volume_copy.go
@@ -13,11 +13,11 @@
 }
 
 // GetCopyMode gets the copy mode from the mode string for mounts
-func getCopyMode(mode string) (bool, bool) {
+func getCopyMode(mode string, def bool) (bool, bool) {
 	for _, o := range strings.Split(mode, ",") {
 		if isEnabled, exists := copyModes[o]; exists {
 			return isEnabled, true
 		}
 	}
-	return DefaultCopyMode, false
+	return def, false
 }
diff --git a/volume/volume_copy_unix.go b/volume/volume_copy_unix.go
deleted file mode 100644
index ad66e17..0000000
--- a/volume/volume_copy_unix.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !windows
-
-package volume
-
-const (
-	// DefaultCopyMode is the copy mode used by default for normal/named volumes
-	DefaultCopyMode = true
-)
diff --git a/volume/volume_copy_windows.go b/volume/volume_copy_windows.go
deleted file mode 100644
index 798638c..0000000
--- a/volume/volume_copy_windows.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package volume
-
-const (
-	// DefaultCopyMode is the copy mode used by default for normal/named volumes
-	DefaultCopyMode = false
-)
diff --git a/volume/volume_linux.go b/volume/volume_linux.go
deleted file mode 100644
index fdf7b63..0000000
--- a/volume/volume_linux.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// +build linux
-
-package volume
-
-import (
-	"fmt"
-	"strings"
-
-	mounttypes "github.com/docker/docker/api/types/mount"
-)
-
-// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string
-// for mount(2).
-func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) {
-	var rawOpts []string
-	if readOnly {
-		rawOpts = append(rawOpts, "ro")
-	}
-
-	if opt != nil && opt.Mode != 0 {
-		rawOpts = append(rawOpts, fmt.Sprintf("mode=%o", opt.Mode))
-	}
-
-	if opt != nil && opt.SizeBytes != 0 {
-		// calculate suffix here, making this linux specific, but that is
-		// okay, since API is that way anyways.
-
-		// we do this by finding the suffix that divides evenly into the
-		// value, returning the value itself, with no suffix, if it fails.
-		//
-		// For the most part, we don't enforce any semantic to this values.
-		// The operating system will usually align this and enforce minimum
-		// and maximums.
-		var (
-			size   = opt.SizeBytes
-			suffix string
-		)
-		for _, r := range []struct {
-			suffix  string
-			divisor int64
-		}{
-			{"g", 1 << 30},
-			{"m", 1 << 20},
-			{"k", 1 << 10},
-		} {
-			if size%r.divisor == 0 {
-				size = size / r.divisor
-				suffix = r.suffix
-				break
-			}
-		}
-
-		rawOpts = append(rawOpts, fmt.Sprintf("size=%d%s", size, suffix))
-	}
-	return strings.Join(rawOpts, ","), nil
-}
diff --git a/volume/volume_linux_test.go b/volume/volume_linux_test.go
deleted file mode 100644
index 40ce552..0000000
--- a/volume/volume_linux_test.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// +build linux
-
-package volume
-
-import (
-	"strings"
-	"testing"
-
-	mounttypes "github.com/docker/docker/api/types/mount"
-)
-
-func TestConvertTmpfsOptions(t *testing.T) {
-	type testCase struct {
-		opt                  mounttypes.TmpfsOptions
-		readOnly             bool
-		expectedSubstrings   []string
-		unexpectedSubstrings []string
-	}
-	cases := []testCase{
-		{
-			opt:                  mounttypes.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700},
-			readOnly:             false,
-			expectedSubstrings:   []string{"size=1m", "mode=700"},
-			unexpectedSubstrings: []string{"ro"},
-		},
-		{
-			opt:                  mounttypes.TmpfsOptions{},
-			readOnly:             true,
-			expectedSubstrings:   []string{"ro"},
-			unexpectedSubstrings: []string{},
-		},
-	}
-	for _, c := range cases {
-		data, err := ConvertTmpfsOptions(&c.opt, c.readOnly)
-		if err != nil {
-			t.Fatalf("could not convert %+v (readOnly: %v) to string: %v",
-				c.opt, c.readOnly, err)
-		}
-		t.Logf("data=%q", data)
-		for _, s := range c.expectedSubstrings {
-			if !strings.Contains(data, s) {
-				t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c)
-			}
-		}
-		for _, s := range c.unexpectedSubstrings {
-			if strings.Contains(data, s) {
-				t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c)
-			}
-		}
-	}
-}
diff --git a/volume/volume_propagation_linux.go b/volume/volume_propagation_linux.go
deleted file mode 100644
index 1de57ab..0000000
--- a/volume/volume_propagation_linux.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// +build linux
-
-package volume
-
-import (
-	"strings"
-
-	mounttypes "github.com/docker/docker/api/types/mount"
-)
-
-// DefaultPropagationMode defines what propagation mode should be used by
-// default if user has not specified one explicitly.
-// propagation modes
-const DefaultPropagationMode = mounttypes.PropagationRPrivate
-
-var propagationModes = map[mounttypes.Propagation]bool{
-	mounttypes.PropagationPrivate:  true,
-	mounttypes.PropagationRPrivate: true,
-	mounttypes.PropagationSlave:    true,
-	mounttypes.PropagationRSlave:   true,
-	mounttypes.PropagationShared:   true,
-	mounttypes.PropagationRShared:  true,
-}
-
-// GetPropagation extracts and returns the mount propagation mode. If there
-// are no specifications, then by default it is "private".
-func GetPropagation(mode string) mounttypes.Propagation {
-	for _, o := range strings.Split(mode, ",") {
-		prop := mounttypes.Propagation(o)
-		if propagationModes[prop] {
-			return prop
-		}
-	}
-	return DefaultPropagationMode
-}
-
-// HasPropagation checks if there is a valid propagation mode present in
-// passed string. Returns true if a valid propagation mode specifier is
-// present, false otherwise.
-func HasPropagation(mode string) bool {
-	for _, o := range strings.Split(mode, ",") {
-		if propagationModes[mounttypes.Propagation(o)] {
-			return true
-		}
-	}
-	return false
-}
diff --git a/volume/volume_propagation_linux_test.go b/volume/volume_propagation_linux_test.go
deleted file mode 100644
index 46d0265..0000000
--- a/volume/volume_propagation_linux_test.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// +build linux
-
-package volume
-
-import (
-	"strings"
-	"testing"
-)
-
-func TestParseMountRawPropagation(t *testing.T) {
-	var (
-		valid   []string
-		invalid map[string]string
-	)
-
-	valid = []string{
-		"/hostPath:/containerPath:shared",
-		"/hostPath:/containerPath:rshared",
-		"/hostPath:/containerPath:slave",
-		"/hostPath:/containerPath:rslave",
-		"/hostPath:/containerPath:private",
-		"/hostPath:/containerPath:rprivate",
-		"/hostPath:/containerPath:ro,shared",
-		"/hostPath:/containerPath:ro,slave",
-		"/hostPath:/containerPath:ro,private",
-		"/hostPath:/containerPath:ro,z,shared",
-		"/hostPath:/containerPath:ro,Z,slave",
-		"/hostPath:/containerPath:Z,ro,slave",
-		"/hostPath:/containerPath:slave,Z,ro",
-		"/hostPath:/containerPath:Z,slave,ro",
-		"/hostPath:/containerPath:slave,ro,Z",
-		"/hostPath:/containerPath:rslave,ro,Z",
-		"/hostPath:/containerPath:ro,rshared,Z",
-		"/hostPath:/containerPath:ro,Z,rprivate",
-	}
-	invalid = map[string]string{
-		"/path:/path:ro,rshared,rslave":   `invalid mode`,
-		"/path:/path:ro,z,rshared,rslave": `invalid mode`,
-		"/path:shared":                    "invalid volume specification",
-		"/path:slave":                     "invalid volume specification",
-		"/path:private":                   "invalid volume specification",
-		"name:/absolute-path:shared":      "invalid volume specification",
-		"name:/absolute-path:rshared":     "invalid volume specification",
-		"name:/absolute-path:slave":       "invalid volume specification",
-		"name:/absolute-path:rslave":      "invalid volume specification",
-		"name:/absolute-path:private":     "invalid volume specification",
-		"name:/absolute-path:rprivate":    "invalid volume specification",
-	}
-
-	for _, path := range valid {
-		if _, err := ParseMountRaw(path, "local"); err != nil {
-			t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err)
-		}
-	}
-
-	for path, expectedError := range invalid {
-		if _, err := ParseMountRaw(path, "local"); err == nil {
-			t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err %v", path, err)
-		} else {
-			if !strings.Contains(err.Error(), expectedError) {
-				t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error())
-			}
-		}
-	}
-}
diff --git a/volume/volume_propagation_unsupported.go b/volume/volume_propagation_unsupported.go
deleted file mode 100644
index 7311ffc..0000000
--- a/volume/volume_propagation_unsupported.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// +build !linux
-
-package volume
-
-import mounttypes "github.com/docker/docker/api/types/mount"
-
-// DefaultPropagationMode is used only in linux. In other cases it returns
-// empty string.
-const DefaultPropagationMode mounttypes.Propagation = ""
-
-// propagation modes not supported on this platform.
-var propagationModes = map[mounttypes.Propagation]bool{}
-
-// GetPropagation is not supported. Return empty string.
-func GetPropagation(mode string) mounttypes.Propagation {
-	return DefaultPropagationMode
-}
-
-// HasPropagation checks if there is a valid propagation mode present in
-// passed string. Returns true if a valid propagation mode specifier is
-// present, false otherwise.
-func HasPropagation(mode string) bool {
-	return false
-}
diff --git a/volume/volume_test.go b/volume/volume_test.go
index 395f374..9f2020d 100644
--- a/volume/volume_test.go
+++ b/volume/volume_test.go
@@ -10,14 +10,84 @@
 	"github.com/docker/docker/api/types/mount"
 )
 
-func TestParseMountRaw(t *testing.T) {
-	var (
-		valid   []string
-		invalid map[string]string
-	)
+type parseMountRawTestSet struct {
+	valid   []string
+	invalid map[string]string
+}
 
-	if runtime.GOOS == "windows" {
-		valid = []string{
+func TestConvertTmpfsOptions(t *testing.T) {
+	type testCase struct {
+		opt                  mount.TmpfsOptions
+		readOnly             bool
+		expectedSubstrings   []string
+		unexpectedSubstrings []string
+	}
+	cases := []testCase{
+		{
+			opt:                  mount.TmpfsOptions{SizeBytes: 1024 * 1024, Mode: 0700},
+			readOnly:             false,
+			expectedSubstrings:   []string{"size=1m", "mode=700"},
+			unexpectedSubstrings: []string{"ro"},
+		},
+		{
+			opt:                  mount.TmpfsOptions{},
+			readOnly:             true,
+			expectedSubstrings:   []string{"ro"},
+			unexpectedSubstrings: []string{},
+		},
+	}
+	p := &linuxParser{}
+	for _, c := range cases {
+		data, err := p.ConvertTmpfsOptions(&c.opt, c.readOnly)
+		if err != nil {
+			t.Fatalf("could not convert %+v (readOnly: %v) to string: %v",
+				c.opt, c.readOnly, err)
+		}
+		t.Logf("data=%q", data)
+		for _, s := range c.expectedSubstrings {
+			if !strings.Contains(data, s) {
+				t.Fatalf("expected substring: %s, got %v (case=%+v)", s, data, c)
+			}
+		}
+		for _, s := range c.unexpectedSubstrings {
+			if strings.Contains(data, s) {
+				t.Fatalf("unexpected substring: %s, got %v (case=%+v)", s, data, c)
+			}
+		}
+	}
+}
+
+type mockFiProvider struct{}
+
+func (mockFiProvider) fileInfo(path string) (exists, isDir bool, err error) {
+	dirs := map[string]struct{}{
+		`c:\`:                    {},
+		`c:\windows\`:            {},
+		`c:\windows`:             {},
+		`c:\program files`:       {},
+		`c:\Windows`:             {},
+		`c:\Program Files (x86)`: {},
+		`\\?\c:\windows\`:        {},
+	}
+	files := map[string]struct{}{
+		`c:\windows\system32\ntdll.dll`: {},
+	}
+	if _, ok := dirs[path]; ok {
+		return true, true, nil
+	}
+	if _, ok := files[path]; ok {
+		return true, false, nil
+	}
+	return false, false, nil
+}
+
+func TestParseMountRaw(t *testing.T) {
+
+	previousProvider := currentFileInfoProvider
+	defer func() { currentFileInfoProvider = previousProvider }()
+	currentFileInfoProvider = mockFiProvider{}
+	windowsSet := parseMountRawTestSet{
+		valid: []string{
 			`d:\`,
 			`d:`,
 			`d:\path`,
@@ -35,10 +105,14 @@
 			`name:D::RO`,
 			`c:/:d:/forward/slashes/are/good/too`,
 			`c:/:d:/including with/spaces:ro`,
-			`c:\Windows`,             // With capital
-			`c:\Program Files (x86)`, // With capitals and brackets
-		}
-		invalid = map[string]string{
+			`c:\Windows`,                // With capital
+			`c:\Program Files (x86)`,    // With capitals and brackets
+			`\\?\c:\windows\:d:`,        // Long path handling (source)
+			`c:\windows\:\\?\d:\`,       // Long path handling (target)
+			`\\.\pipe\foo:\\.\pipe\foo`, // named pipe
+			`//./pipe/foo://./pipe/foo`, // named pipe forward slashes
+		},
+		invalid: map[string]string{
 			``:                                 "invalid volume specification: ",
 			`.`:                                "invalid volume specification: ",
 			`..\`:                              "invalid volume specification: ",
@@ -82,10 +156,79 @@
 			`lpt8:d:`:                          `cannot be a reserved word for Windows filenames`,
 			`lpt9:d:`:                          `cannot be a reserved word for Windows filenames`,
 			`c:\windows\system32\ntdll.dll`:    `Only directories can be mapped on this platform`,
-		}
-
-	} else {
-		valid = []string{
+			`\\.\pipe\foo:c:\pipe`:             `'c:\pipe' is not a valid pipe path`,
+		},
+	}
+	lcowSet := parseMountRawTestSet{
+		valid: []string{
+			`/foo`,
+			`/foo/`,
+			`/foo bar`,
+			`c:\:/foo`,
+			`c:\windows\:/foo`,
+			`c:\windows:/s p a c e`,
+			`c:\windows:/s p a c e:RW`,
+			`c:\program files:/s p a c e i n h o s t d i r`,
+			`0123456789name:/foo`,
+			`MiXeDcAsEnAmE:/foo`,
+			`name:/foo`,
+			`name:/foo:rW`,
+			`name:/foo:RW`,
+			`name:/foo:RO`,
+			`c:/:/forward/slashes/are/good/too`,
+			`c:/:/including with/spaces:ro`,
+			`/Program Files (x86)`, // With capitals and brackets
+		},
+		invalid: map[string]string{
+			``:                                   "invalid volume specification: ",
+			`.`:                                  "invalid volume specification: ",
+			`c:`:                                 "invalid volume specification: ",
+			`c:\`:                                "invalid volume specification: ",
+			`../`:                                "invalid volume specification: ",
+			`c:\:../`:                            "invalid volume specification: ",
+			`c:\:/foo:xyzzy`:                     "invalid volume specification: ",
+			`/`:                                  "destination can't be '/'",
+			`/..`:                                "destination can't be '/'",
+			`c:\notexist:/foo`:                   `source path does not exist`,
+			`c:\windows\system32\ntdll.dll:/foo`: `source path must be a directory`,
+			`name<:/foo`:                         `invalid volume specification`,
+			`name>:/foo`:                         `invalid volume specification`,
+			`name::/foo`:                         `invalid volume specification`,
+			`name":/foo`:                         `invalid volume specification`,
+			`name\:/foo`:                         `invalid volume specification`,
+			`name*:/foo`:                         `invalid volume specification`,
+			`name|:/foo`:                         `invalid volume specification`,
+			`name?:/foo`:                         `invalid volume specification`,
+			`name/:/foo`:                         `invalid volume specification`,
+			`/foo:rw`:                            `invalid volume specification`,
+			`/foo:ro`:                            `invalid volume specification`,
+			`con:/foo`:                           `cannot be a reserved word for Windows filenames`,
+			`PRN:/foo`:                           `cannot be a reserved word for Windows filenames`,
+			`aUx:/foo`:                           `cannot be a reserved word for Windows filenames`,
+			`nul:/foo`:                           `cannot be a reserved word for Windows filenames`,
+			`com1:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`com2:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`com3:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`com4:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`com5:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`com6:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`com7:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`com8:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`com9:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt1:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt2:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt3:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt4:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt5:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt6:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt7:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt8:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`lpt9:/foo`:                          `cannot be a reserved word for Windows filenames`,
+			`\\.\pipe\foo:/foo`:                  `Linux containers on Windows do not support named pipe mounts`,
+		},
+	}
+	linuxSet := parseMountRawTestSet{
+		valid: []string{
 			"/home",
 			"/home:/home",
 			"/home:/something/else",
@@ -95,47 +238,87 @@
 			"hostPath:/containerPath:ro",
 			"/hostPath:/containerPath:rw",
 			"/rw:/ro",
-		}
-		invalid = map[string]string{
-			"":                "invalid volume specification",
-			"./":              "mount path must be absolute",
-			"../":             "mount path must be absolute",
-			"/:../":           "mount path must be absolute",
-			"/:path":          "mount path must be absolute",
-			":":               "invalid volume specification",
-			"/tmp:":           "invalid volume specification",
-			":test":           "invalid volume specification",
-			":/test":          "invalid volume specification",
-			"tmp:":            "invalid volume specification",
-			":test:":          "invalid volume specification",
-			"::":              "invalid volume specification",
-			":::":             "invalid volume specification",
-			"/tmp:::":         "invalid volume specification",
-			":/tmp::":         "invalid volume specification",
-			"/path:rw":        "invalid volume specification",
-			"/path:ro":        "invalid volume specification",
-			"/rw:rw":          "invalid volume specification",
-			"path:ro":         "invalid volume specification",
-			"/path:/path:sw":  `invalid mode`,
-			"/path:/path:rwz": `invalid mode`,
-		}
+			"/hostPath:/containerPath:shared",
+			"/hostPath:/containerPath:rshared",
+			"/hostPath:/containerPath:slave",
+			"/hostPath:/containerPath:rslave",
+			"/hostPath:/containerPath:private",
+			"/hostPath:/containerPath:rprivate",
+			"/hostPath:/containerPath:ro,shared",
+			"/hostPath:/containerPath:ro,slave",
+			"/hostPath:/containerPath:ro,private",
+			"/hostPath:/containerPath:ro,z,shared",
+			"/hostPath:/containerPath:ro,Z,slave",
+			"/hostPath:/containerPath:Z,ro,slave",
+			"/hostPath:/containerPath:slave,Z,ro",
+			"/hostPath:/containerPath:Z,slave,ro",
+			"/hostPath:/containerPath:slave,ro,Z",
+			"/hostPath:/containerPath:rslave,ro,Z",
+			"/hostPath:/containerPath:ro,rshared,Z",
+			"/hostPath:/containerPath:ro,Z,rprivate",
+		},
+		invalid: map[string]string{
+			"":                                "invalid volume specification",
+			"./":                              "mount path must be absolute",
+			"../":                             "mount path must be absolute",
+			"/:../":                           "mount path must be absolute",
+			"/:path":                          "mount path must be absolute",
+			":":                               "invalid volume specification",
+			"/tmp:":                           "invalid volume specification",
+			":test":                           "invalid volume specification",
+			":/test":                          "invalid volume specification",
+			"tmp:":                            "invalid volume specification",
+			":test:":                          "invalid volume specification",
+			"::":                              "invalid volume specification",
+			":::":                             "invalid volume specification",
+			"/tmp:::":                         "invalid volume specification",
+			":/tmp::":                         "invalid volume specification",
+			"/path:rw":                        "invalid volume specification",
+			"/path:ro":                        "invalid volume specification",
+			"/rw:rw":                          "invalid volume specification",
+			"path:ro":                         "invalid volume specification",
+			"/path:/path:sw":                  `invalid mode`,
+			"/path:/path:rwz":                 `invalid mode`,
+			"/path:/path:ro,rshared,rslave":   `invalid mode`,
+			"/path:/path:ro,z,rshared,rslave": `invalid mode`,
+			"/path:shared":                    "invalid volume specification",
+			"/path:slave":                     "invalid volume specification",
+			"/path:private":                   "invalid volume specification",
+			"name:/absolute-path:shared":      "invalid volume specification",
+			"name:/absolute-path:rshared":     "invalid volume specification",
+			"name:/absolute-path:slave":       "invalid volume specification",
+			"name:/absolute-path:rslave":      "invalid volume specification",
+			"name:/absolute-path:private":     "invalid volume specification",
+			"name:/absolute-path:rprivate":    "invalid volume specification",
+		},
 	}
 
-	for _, path := range valid {
-		if _, err := ParseMountRaw(path, "local"); err != nil {
-			t.Fatalf("ParseMountRaw(`%q`) should succeed: error %q", path, err)
-		}
-	}
+	linParser := &linuxParser{}
+	winParser := &windowsParser{}
+	lcowParser := &lcowParser{}
+	tester := func(parser Parser, set parseMountRawTestSet) {
 
-	for path, expectedError := range invalid {
-		if mp, err := ParseMountRaw(path, "local"); err == nil {
-			t.Fatalf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp)
-		} else {
-			if !strings.Contains(err.Error(), expectedError) {
-				t.Fatalf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error())
+		for _, path := range set.valid {
+
+			if _, err := parser.ParseMountRaw(path, "local"); err != nil {
+				t.Errorf("ParseMountRaw(`%q`) should succeed: error %q", path, err)
+			}
+		}
+
+		for path, expectedError := range set.invalid {
+			if mp, err := parser.ParseMountRaw(path, "local"); err == nil {
+				t.Errorf("ParseMountRaw(`%q`) should have failed validation. Err '%v' - MP: %v", path, err, mp)
+			} else {
+				if !strings.Contains(err.Error(), expectedError) {
+					t.Errorf("ParseMountRaw(`%q`) error should contain %q, got %v", path, expectedError, err.Error())
+				}
 			}
 		}
 	}
+	tester(linParser, linuxSet)
+	tester(winParser, windowsSet)
+	tester(lcowParser, lcowSet)
+
 }
 
 // testParseMountRaw is a structure used by TestParseMountRawSplit for
@@ -153,76 +336,96 @@
 }
 
 func TestParseMountRawSplit(t *testing.T) {
-	var cases []testParseMountRaw
-	if runtime.GOOS == "windows" {
-		cases = []testParseMountRaw{
-			{`c:\:d:`, "local", mount.TypeBind, `d:`, `c:\`, ``, "", true, false},
-			{`c:\:d:\`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false},
-			{`c:\:d:\:ro`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, false},
-			{`c:\:d:\:rw`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false},
-			{`c:\:d:\:foo`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, true},
-			{`\\.\pipe\foo:\\.\pipe\bar`, "local", mount.TypeNamedPipe, `\\.\pipe\bar`, `\\.\pipe\foo`, "", "", true, false},
-			{`\\.\pipe\foo:c:\foo\bar`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true},
-			{`c:\foo\bar:\\.\pipe\foo`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true},
-			{`name:d::rw`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false},
-			{`name:d:`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false},
-			{`name:d::ro`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", false, false},
-			{`name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true},
-			{`driver/name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true},
-		}
-	} else {
-		cases = []testParseMountRaw{
-			{"/tmp:/tmp1", "", mount.TypeBind, "/tmp1", "/tmp", "", "", true, false},
-			{"/tmp:/tmp2:ro", "", mount.TypeBind, "/tmp2", "/tmp", "", "", false, false},
-			{"/tmp:/tmp3:rw", "", mount.TypeBind, "/tmp3", "/tmp", "", "", true, false},
-			{"/tmp:/tmp4:foo", "", mount.TypeBind, "", "", "", "", false, true},
-			{"name:/named1", "", mount.TypeVolume, "/named1", "", "name", "", true, false},
-			{"name:/named2", "external", mount.TypeVolume, "/named2", "", "name", "external", true, false},
-			{"name:/named3:ro", "local", mount.TypeVolume, "/named3", "", "name", "local", false, false},
-			{"local/name:/tmp:rw", "", mount.TypeVolume, "/tmp", "", "local/name", "", true, false},
-			{"/tmp:tmp", "", mount.TypeBind, "", "", "", "", true, true},
-		}
+	previousProvider := currentFileInfoProvider
+	defer func() { currentFileInfoProvider = previousProvider }()
+	currentFileInfoProvider = mockFiProvider{}
+	windowsCases := []testParseMountRaw{
+		{`c:\:d:`, "local", mount.TypeBind, `d:`, `c:\`, ``, "", true, false},
+		{`c:\:d:\`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false},
+		{`c:\:d:\:ro`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, false},
+		{`c:\:d:\:rw`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", true, false},
+		{`c:\:d:\:foo`, "local", mount.TypeBind, `d:\`, `c:\`, ``, "", false, true},
+		{`name:d::rw`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false},
+		{`name:d:`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", true, false},
+		{`name:d::ro`, "local", mount.TypeVolume, `d:`, ``, `name`, "local", false, false},
+		{`name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true},
+		{`driver/name:c:`, "", mount.TypeVolume, ``, ``, ``, "", true, true},
+		{`\\.\pipe\foo:\\.\pipe\bar`, "local", mount.TypeNamedPipe, `\\.\pipe\bar`, `\\.\pipe\foo`, "", "", true, false},
+		{`\\.\pipe\foo:c:\foo\bar`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true},
+		{`c:\foo\bar:\\.\pipe\foo`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true},
 	}
-
-	for i, c := range cases {
-		t.Logf("case %d", i)
-		m, err := ParseMountRaw(c.bind, c.driver)
-		if c.fail {
-			if err == nil {
-				t.Fatalf("Expected error, was nil, for spec %s\n", c.bind)
+	lcowCases := []testParseMountRaw{
+		{`c:\:/foo`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", true, false},
+		{`c:\:/foo:ro`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", false, false},
+		{`c:\:/foo:rw`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", true, false},
+		{`c:\:/foo:foo`, "local", mount.TypeBind, `/foo`, `c:\`, ``, "", false, true},
+		{`name:/foo:rw`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", true, false},
+		{`name:/foo`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", true, false},
+		{`name:/foo:ro`, "local", mount.TypeVolume, `/foo`, ``, `name`, "local", false, false},
+		{`name:/`, "", mount.TypeVolume, ``, ``, ``, "", true, true},
+		{`driver/name:/`, "", mount.TypeVolume, ``, ``, ``, "", true, true},
+		{`\\.\pipe\foo:\\.\pipe\bar`, "local", mount.TypeNamedPipe, `\\.\pipe\bar`, `\\.\pipe\foo`, "", "", true, true},
+		{`\\.\pipe\foo:/data`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true},
+		{`c:\foo\bar:\\.\pipe\foo`, "local", mount.TypeNamedPipe, ``, ``, "", "", true, true},
+	}
+	linuxCases := []testParseMountRaw{
+		{"/tmp:/tmp1", "", mount.TypeBind, "/tmp1", "/tmp", "", "", true, false},
+		{"/tmp:/tmp2:ro", "", mount.TypeBind, "/tmp2", "/tmp", "", "", false, false},
+		{"/tmp:/tmp3:rw", "", mount.TypeBind, "/tmp3", "/tmp", "", "", true, false},
+		{"/tmp:/tmp4:foo", "", mount.TypeBind, "", "", "", "", false, true},
+		{"name:/named1", "", mount.TypeVolume, "/named1", "", "name", "", true, false},
+		{"name:/named2", "external", mount.TypeVolume, "/named2", "", "name", "external", true, false},
+		{"name:/named3:ro", "local", mount.TypeVolume, "/named3", "", "name", "local", false, false},
+		{"local/name:/tmp:rw", "", mount.TypeVolume, "/tmp", "", "local/name", "", true, false},
+		{"/tmp:tmp", "", mount.TypeBind, "", "", "", "", true, true},
+	}
+	linParser := &linuxParser{}
+	winParser := &windowsParser{}
+	lcowParser := &lcowParser{}
+	tester := func(parser Parser, cases []testParseMountRaw) {
+		for i, c := range cases {
+			t.Logf("case %d", i)
+			m, err := parser.ParseMountRaw(c.bind, c.driver)
+			if c.fail {
+				if err == nil {
+					t.Errorf("Expected error, was nil, for spec %s\n", c.bind)
+				}
+				continue
 			}
-			continue
-		}
 
-		if m == nil || err != nil {
-			t.Fatalf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error())
-			continue
-		}
+			if m == nil || err != nil {
+				t.Errorf("ParseMountRaw failed for spec '%s', driver '%s', error '%v'", c.bind, c.driver, err.Error())
+				continue
+			}
 
-		if m.Type != c.expType {
-			t.Fatalf("Expected type '%s', was '%s', for spec '%s'", c.expType, m.Type, c.bind)
-		}
+			if m.Destination != c.expDest {
+				t.Errorf("Expected destination '%s, was %s', for spec '%s'", c.expDest, m.Destination, c.bind)
+			}
 
-		if m.Destination != c.expDest {
-			t.Fatalf("Expected destination '%s', was '%s', for spec '%s'", c.expDest, m.Destination, c.bind)
-		}
+			if m.Source != c.expSource {
+				t.Errorf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind)
+			}
 
-		if m.Source != c.expSource {
-			t.Fatalf("Expected source '%s', was '%s', for spec '%s'", c.expSource, m.Source, c.bind)
-		}
+			if m.Name != c.expName {
+				t.Errorf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind)
+			}
 
-		if m.Name != c.expName {
-			t.Fatalf("Expected name '%s', was '%s' for spec '%s'", c.expName, m.Name, c.bind)
-		}
+			if m.Driver != c.expDriver {
+				t.Errorf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind)
+			}
 
-		if m.Driver != c.expDriver {
-			t.Fatalf("Expected driver '%s', was '%s', for spec '%s'", c.expDriver, m.Driver, c.bind)
-		}
-
-		if m.RW != c.expRW {
-			t.Fatalf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind)
+			if m.RW != c.expRW {
+				t.Errorf("Expected RW '%v', was '%v' for spec '%s'", c.expRW, m.RW, c.bind)
+			}
+			if m.Type != c.expType {
+				t.Fatalf("Expected type '%s', was '%s', for spec '%s'", c.expType, m.Type, c.bind)
+			}
 		}
 	}
+
+	tester(linParser, linuxCases)
+	tester(winParser, windowsCases)
+	tester(lcowParser, lcowCases)
 }
 
 func TestParseMountSpec(t *testing.T) {
@@ -235,43 +438,43 @@
 		t.Fatal(err)
 	}
 	defer os.RemoveAll(testDir)
-
+	parser := NewParser(runtime.GOOS)
 	cases := []c{
-		{mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}},
-		{mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true, Propagation: DefaultPropagationMode}},
-		{mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}},
-		{mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: DefaultPropagationMode}},
-		{mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}},
-		{mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: DefaultCopyMode}},
+		{mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}},
+		{mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, RW: true, Propagation: parser.DefaultPropagationMode()}},
+		{mount.Mount{Type: mount.TypeBind, Source: testDir + string(os.PathSeparator), Target: testDestinationPath, ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}},
+		{mount.Mount{Type: mount.TypeBind, Source: testDir, Target: testDestinationPath + string(os.PathSeparator), ReadOnly: true}, MountPoint{Type: mount.TypeBind, Source: testDir, Destination: testDestinationPath, Propagation: parser.DefaultPropagationMode()}},
+		{mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: parser.DefaultCopyMode()}},
+		{mount.Mount{Type: mount.TypeVolume, Target: testDestinationPath + string(os.PathSeparator)}, MountPoint{Type: mount.TypeVolume, Destination: testDestinationPath, RW: true, CopyData: parser.DefaultCopyMode()}},
 	}
 
 	for i, c := range cases {
 		t.Logf("case %d", i)
-		mp, err := ParseMountSpec(c.input)
+		mp, err := parser.ParseMountSpec(c.input)
 		if err != nil {
-			t.Fatal(err)
+			t.Error(err)
 		}
 
 		if c.expected.Type != mp.Type {
-			t.Fatalf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type)
+			t.Errorf("Expected mount types to match. Expected: '%s', Actual: '%s'", c.expected.Type, mp.Type)
 		}
 		if c.expected.Destination != mp.Destination {
-			t.Fatalf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination)
+			t.Errorf("Expected mount destination to match. Expected: '%s', Actual: '%s'", c.expected.Destination, mp.Destination)
 		}
 		if c.expected.Source != mp.Source {
-			t.Fatalf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source)
+			t.Errorf("Expected mount source to match. Expected: '%s', Actual: '%s'", c.expected.Source, mp.Source)
 		}
 		if c.expected.RW != mp.RW {
-			t.Fatalf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW)
+			t.Errorf("Expected mount writable to match. Expected: '%v', Actual: '%v'", c.expected.RW, mp.RW)
 		}
 		if c.expected.Propagation != mp.Propagation {
-			t.Fatalf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation)
+			t.Errorf("Expected mount propagation to match. Expected: '%v', Actual: '%s'", c.expected.Propagation, mp.Propagation)
 		}
 		if c.expected.Driver != mp.Driver {
-			t.Fatalf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver)
+			t.Errorf("Expected mount driver to match. Expected: '%v', Actual: '%s'", c.expected.Driver, mp.Driver)
 		}
 		if c.expected.CopyData != mp.CopyData {
-			t.Fatalf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData)
+			t.Errorf("Expected mount copy data to match. Expected: '%v', Actual: '%v'", c.expected.CopyData, mp.CopyData)
 		}
 	}
 }
diff --git a/volume/volume_unix.go b/volume/volume_unix.go
index 31555a6..1cb9317 100644
--- a/volume/volume_unix.go
+++ b/volume/volume_unix.go
@@ -1,156 +1,18 @@
-// +build linux freebsd darwin solaris
+// +build linux freebsd darwin
 
 package volume
 
 import (
 	"fmt"
-	"os"
 	"path/filepath"
 	"strings"
-
-	mounttypes "github.com/docker/docker/api/types/mount"
 )
 
-var platformRawValidationOpts = []func(o *validateOpts){
-	// need to make sure to not error out if the bind source does not exist on unix
-	// this is supported for historical reasons, the path will be automatically
-	// created later.
-	func(o *validateOpts) { o.skipBindSourceCheck = true },
-}
-
-// read-write modes
-var rwModes = map[string]bool{
-	"rw": true,
-	"ro": true,
-}
-
-// label modes
-var labelModes = map[string]bool{
-	"Z": true,
-	"z": true,
-}
-
-// consistency modes
-var consistencyModes = map[mounttypes.Consistency]bool{
-	mounttypes.ConsistencyFull:      true,
-	mounttypes.ConsistencyCached:    true,
-	mounttypes.ConsistencyDelegated: true,
-}
-
-// BackwardsCompatible decides whether this mount point can be
-// used in old versions of Docker or not.
-// Only bind mounts and local volumes can be used in old versions of Docker.
-func (m *MountPoint) BackwardsCompatible() bool {
-	return len(m.Source) > 0 || m.Driver == DefaultDriverName
-}
-
-// HasResource checks whether the given absolute path for a container is in
-// this mount point. If the relative path starts with `../` then the resource
-// is outside of this mount point, but we can't simply check for this prefix
-// because it misses `..` which is also outside of the mount, so check both.
-func (m *MountPoint) HasResource(absolutePath string) bool {
+func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool {
 	relPath, err := filepath.Rel(m.Destination, absolutePath)
 	return err == nil && relPath != ".." && !strings.HasPrefix(relPath, fmt.Sprintf("..%c", filepath.Separator))
 }
 
-// IsVolumeNameValid checks a volume name in a platform specific manner.
-func IsVolumeNameValid(name string) (bool, error) {
-	return true, nil
-}
-
-// ValidMountMode will make sure the mount mode is valid.
-// returns if it's a valid mount mode or not.
-func ValidMountMode(mode string) bool {
-	if mode == "" {
-		return true
-	}
-
-	rwModeCount := 0
-	labelModeCount := 0
-	propagationModeCount := 0
-	copyModeCount := 0
-	consistencyModeCount := 0
-
-	for _, o := range strings.Split(mode, ",") {
-		switch {
-		case rwModes[o]:
-			rwModeCount++
-		case labelModes[o]:
-			labelModeCount++
-		case propagationModes[mounttypes.Propagation(o)]:
-			propagationModeCount++
-		case copyModeExists(o):
-			copyModeCount++
-		case consistencyModes[mounttypes.Consistency(o)]:
-			consistencyModeCount++
-		default:
-			return false
-		}
-	}
-
-	// Only one string for each mode is allowed.
-	if rwModeCount > 1 || labelModeCount > 1 || propagationModeCount > 1 || copyModeCount > 1 || consistencyModeCount > 1 {
-		return false
-	}
-	return true
-}
-
-// ReadWrite tells you if a mode string is a valid read-write mode or not.
-// If there are no specifications w.r.t read write mode, then by default
-// it returns true.
-func ReadWrite(mode string) bool {
-	if !ValidMountMode(mode) {
-		return false
-	}
-
-	for _, o := range strings.Split(mode, ",") {
-		if o == "ro" {
-			return false
-		}
-	}
-	return true
-}
-
-func validateNotRoot(p string) error {
-	p = filepath.Clean(convertSlash(p))
-	if p == "/" {
-		return fmt.Errorf("invalid specification: destination can't be '/'")
-	}
-	return nil
-}
-
-func convertSlash(p string) string {
-	return p
-}
-
-// isAbsPath reports whether the path is absolute.
-func isAbsPath(p string) bool {
-	return filepath.IsAbs(p)
-}
-
-func splitRawSpec(raw string) ([]string, error) {
-	if strings.Count(raw, ":") > 2 {
-		return nil, errInvalidSpec(raw)
-	}
-
-	arr := strings.SplitN(raw, ":", 3)
-	if arr[0] == "" {
-		return nil, errInvalidSpec(raw)
-	}
-	return arr, nil
-}
-
-func detectMountType(p string) mounttypes.Type {
-	if filepath.IsAbs(p) {
-		return mounttypes.TypeBind
-	}
-	return mounttypes.TypeVolume
-}
-
-func clean(p string) string {
-	return filepath.Clean(p)
-}
-
-func validateStat(fi os.FileInfo) error {
-	return nil
+func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool {
+	return false
 }
diff --git a/volume/volume_unsupported.go b/volume/volume_unsupported.go
deleted file mode 100644
index ff9d6af..0000000
--- a/volume/volume_unsupported.go
+++ /dev/null
@@ -1,16 +0,0 @@
-// +build !linux
-
-package volume
-
-import (
-	"fmt"
-	"runtime"
-
-	mounttypes "github.com/docker/docker/api/types/mount"
-)
-
-// ConvertTmpfsOptions converts *mounttypes.TmpfsOptions to the raw option string
-// for mount(2).
-func ConvertTmpfsOptions(opt *mounttypes.TmpfsOptions, readOnly bool) (string, error) {
-	return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS)
-}
diff --git a/volume/volume_windows.go b/volume/volume_windows.go
index 5bee223..8ec1d6c 100644
--- a/volume/volume_windows.go
+++ b/volume/volume_windows.go
@@ -1,213 +1,8 @@
 package volume
 
-import (
-	"fmt"
-	"os"
-	"path/filepath"
-	"regexp"
-	"strings"
-
-	mounttypes "github.com/docker/docker/api/types/mount"
-)
-
-// read-write modes
-var rwModes = map[string]bool{
-	"rw": true,
-}
-
-// read-only modes
-var roModes = map[string]bool{
-	"ro": true,
-}
-
-var platformRawValidationOpts = []func(*validateOpts){}
-
-const (
-	// Spec should be in the format [source:]destination[:mode]
-	//
-	// Examples: c:\foo bar:d:rw
-	//           c:\foo:d:\bar
-	//           myname:d:
-	//           d:\
-	//
-	// Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See
-	// https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to
-	// test is https://regex-golang.appspot.com/assets/html/index.html
-	//
-	// Useful link for referencing named capturing groups:
-	// http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex
-	//
-	// There are three match groups: source, destination and mode.
-	//
-
-	// RXHostDir is the first option of a source
-	RXHostDir = `[a-z]:\\(?:[^\\/:*?"<>|\r\n]+\\?)*`
-	// RXName is the second option of a source
-	RXName = `[^\\/:*?"<>|\r\n]+`
-	// RXPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \)
-	RXPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+`
-	// RXReservedNames are reserved names not possible on Windows
-	RXReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])`
-
-	// RXSource is the combined possibilities for a source
-	RXSource = `((?P<source>((` + RXHostDir + `)|(` + RXName + `)|(` + RXPipe + `))):)?`
-
-	// Source. Can be either a host directory, a name, or omitted:
-	//  HostDir:
-	//    -  Essentially using the folder solution from
-	//       https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html
-	//       but adding case insensitivity.
-	//    -  Must be an absolute path such as c:\path
-	//    -  Can include spaces such as `c:\program files`
-	//    -  And then followed by a colon which is not in the capture group
-	//    -  And can be optional
-	//  Name:
-	//    -  Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
-	//    -  And then followed by a colon which is not in the capture group
-	//    -  And can be optional
-
-	// RXDestinationDir is the file path option for the mount destination
-	RXDestinationDir = `([a-z]):((?:\\[^\\/:*?"<>\r\n]+)*\\?)`
-	// RXDestination is the regex expression for the mount destination
-	RXDestination = `(?P<destination>(` + RXDestinationDir + `)|(` + RXPipe + `))`
-	// Destination (aka container path):
-	//    -  Variation on hostdir but can be a drive followed by colon as well
-	//    -  If a path, must be absolute. Can include spaces
-	//    -  Drive cannot be c: (explicitly checked in code, not RegEx)
-
-	// RXMode is the regex expression for the mode of the mount
-	// Mode (optional):
-	//    -  Hopefully self explanatory in comparison to above regex's.
-	//    -  Colon is not in the capture group
-	RXMode = `(:(?P<mode>(?i)ro|rw))?`
-)
-
-// BackwardsCompatible decides whether this mount point can be
-// used in old versions of Docker or not.
-// Windows volumes are never backwards compatible.
-func (m *MountPoint) BackwardsCompatible() bool {
+func (p *windowsParser) HasResource(m *MountPoint, absolutePath string) bool {
 	return false
 }
-
-func splitRawSpec(raw string) ([]string, error) {
-	specExp := regexp.MustCompile(`^` + RXSource + RXDestination + RXMode + `$`)
-	match := specExp.FindStringSubmatch(strings.ToLower(raw))
-
-	// Must have something back
-	if len(match) == 0 {
-		return nil, errInvalidSpec(raw)
-	}
-
-	var split []string
-	matchgroups := make(map[string]string)
-	// Pull out the sub expressions from the named capture groups
-	for i, name := range specExp.SubexpNames() {
-		matchgroups[name] = strings.ToLower(match[i])
-	}
-	if source, exists := matchgroups["source"]; exists {
-		if source != "" {
-			split = append(split, source)
-		}
-	}
-	if destination, exists := matchgroups["destination"]; exists {
-		if destination != "" {
-			split = append(split, destination)
-		}
-	}
-	if mode, exists := matchgroups["mode"]; exists {
-		if mode != "" {
-			split = append(split, mode)
-		}
-	}
-	// Fix #26329. If the destination appears to be a file, and the source is null,
-	// it may be because we've fallen through the possible naming regex and hit a
-	// situation where the user intention was to map a file into a container through
-	// a local volume, but this is not supported by the platform.
-	if matchgroups["source"] == "" && matchgroups["destination"] != "" {
-		validName, err := IsVolumeNameValid(matchgroups["destination"])
-		if err != nil {
-			return nil, err
-		}
-		if !validName {
-			if fi, err := os.Stat(matchgroups["destination"]); err == nil {
-				if !fi.IsDir() {
-					return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"])
-				}
-			}
-		}
-	}
-	return split, nil
-}
-
-func detectMountType(p string) mounttypes.Type {
-	if strings.HasPrefix(filepath.FromSlash(p), `\\.\pipe\`) {
-		return mounttypes.TypeNamedPipe
-	} else if filepath.IsAbs(p) {
-		return mounttypes.TypeBind
-	}
-	return mounttypes.TypeVolume
-}
-
-// IsVolumeNameValid checks a volume name in a platform specific manner.
-func IsVolumeNameValid(name string) (bool, error) {
-	nameExp := regexp.MustCompile(`^` + RXName + `$`)
-	if !nameExp.MatchString(name) {
-		return false, nil
-	}
-	nameExp = regexp.MustCompile(`^` + RXReservedNames + `$`)
-	if nameExp.MatchString(name) {
-		return false, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name)
-	}
-	return true, nil
-}
-
-// ValidMountMode will make sure the mount mode is valid.
-// returns if it's a valid mount mode or not.
-func ValidMountMode(mode string) bool {
-	if mode == "" {
-		return true
-	}
-	return roModes[strings.ToLower(mode)] || rwModes[strings.ToLower(mode)]
-}
-
-// ReadWrite tells you if a mode string is a valid read-write mode or not.
-func ReadWrite(mode string) bool {
-	return rwModes[strings.ToLower(mode)] || mode == ""
-}
-
-func validateNotRoot(p string) error {
-	p = strings.ToLower(convertSlash(p))
-	if p == "c:" || p == `c:\` {
-		return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p)
-	}
-	return nil
-}
-
-func convertSlash(p string) string {
-	return filepath.FromSlash(p)
-}
-
-// isAbsPath returns whether a path is absolute for the purposes of mounting into a container
-// (absolute paths, drive letter paths such as X:, and paths starting with `\\.\` to support named pipes).
-func isAbsPath(p string) bool {
-	return filepath.IsAbs(p) ||
-		strings.HasPrefix(p, `\\.\`) ||
-		(len(p) == 2 && p[1] == ':' && ((p[0] >= 'a' && p[0] <= 'z') || (p[0] >= 'A' && p[0] <= 'Z')))
-}
-
-// Do not clean plain drive letters or paths starting with `\\.\`.
-var cleanRegexp = regexp.MustCompile(`^([a-z]:|[/\\]{2}\.[/\\].*)$`)
-
-func clean(p string) string {
-	if match := cleanRegexp.MatchString(p); match {
-		return p
-	}
-	return filepath.Clean(p)
-}
-
-func validateStat(fi os.FileInfo) error {
-	if !fi.IsDir() {
-		return fmt.Errorf("source path must be a directory")
-	}
-	return nil
+func (p *linuxParser) HasResource(m *MountPoint, absolutePath string) bool {
+	return false
 }
diff --git a/volume/windows_parser.go b/volume/windows_parser.go
new file mode 100644
index 0000000..172610d
--- /dev/null
+++ b/volume/windows_parser.go
@@ -0,0 +1,456 @@
+package volume
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"regexp"
+	"runtime"
+	"strings"
+
+	"github.com/docker/docker/api/types/mount"
+	"github.com/docker/docker/pkg/stringid"
+)
+
+type windowsParser struct {
+}
+
+const (
+	// Spec should be in the format [source:]destination[:mode]
+	//
+	// Examples: c:\foo bar:d:rw
+	//           c:\foo:d:\bar
+	//           myname:d:
+	//           d:\
+	//
+	// Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See
+	// https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to
+	// test is https://regex-golang.appspot.com/assets/html/index.html
+	//
+	// Useful link for referencing named capturing groups:
+	// http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex
+	//
+	// There are three match groups: source, destination and mode.
+	//
+
+	// rxHostDir is the first option of a source
+	rxHostDir = `(?:\\\\\?\\)?[a-z]:[\\/](?:[^\\/:*?"<>|\r\n]+[\\/]?)*`
+	// rxName is the second option of a source
+	rxName = `[^\\/:*?"<>|\r\n]+`
+
+	// RXReservedNames are reserved names not possible on Windows
+	rxReservedNames = `(con)|(prn)|(nul)|(aux)|(com[1-9])|(lpt[1-9])`
+
+	// rxPipe is a named path pipe (starts with `\\.\pipe\`, possibly with / instead of \)
+	rxPipe = `[/\\]{2}.[/\\]pipe[/\\][^:*?"<>|\r\n]+`
+	// rxSource is the combined possibilities for a source
+	rxSource = `((?P<source>((` + rxHostDir + `)|(` + rxName + `)|(` + rxPipe + `))):)?`
+
+	// Source. Can be either a host directory, a name, or omitted:
+	//  HostDir:
+	//    -  Essentially using the folder solution from
+	//       https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html
+	//       but adding case insensitivity.
+	//    -  Must be an absolute path such as c:\path
+	//    -  Can include spaces such as `c:\program files`
+	//    -  And then followed by a colon which is not in the capture group
+	//    -  And can be optional
+	//  Name:
+	//    -  Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)
+	//    -  And then followed by a colon which is not in the capture group
+	//    -  And can be optional
+
+	// rxDestination is the regex expression for the mount destination
+	rxDestination = `(?P<destination>((?:\\\\\?\\)?([a-z]):((?:[\\/][^\\/:*?"<>\r\n]+)*[\\/]?))|(` + rxPipe + `))`
+
+	rxLCOWDestination = `(?P<destination>/(?:[^\\/:*?"<>\r\n]+[/]?)*)`
+	// Destination (aka container path):
+	//    -  Variation on hostdir but can be a drive followed by colon as well
+	//    -  If a path, must be absolute. Can include spaces
+	//    -  Drive cannot be c: (explicitly checked in code, not RegEx)
+
+	// rxMode is the regex expression for the mode of the mount
+	// Mode (optional):
+	//    -  Hopefully self explanatory in comparison to above regex's.
+	//    -  Colon is not in the capture group
+	rxMode = `(:(?P<mode>(?i)ro|rw))?`
+)
+
+type mountValidator func(mnt *mount.Mount) error
+
+func windowsSplitRawSpec(raw, destRegex string) ([]string, error) {
+	specExp := regexp.MustCompile(`^` + rxSource + destRegex + rxMode + `$`)
+	match := specExp.FindStringSubmatch(strings.ToLower(raw))
+
+	// Must have something back
+	if len(match) == 0 {
+		return nil, errInvalidSpec(raw)
+	}
+
+	var split []string
+	matchgroups := make(map[string]string)
+	// Pull out the sub expressions from the named capture groups
+	for i, name := range specExp.SubexpNames() {
+		matchgroups[name] = strings.ToLower(match[i])
+	}
+	if source, exists := matchgroups["source"]; exists {
+		if source != "" {
+			split = append(split, source)
+		}
+	}
+	if destination, exists := matchgroups["destination"]; exists {
+		if destination != "" {
+			split = append(split, destination)
+		}
+	}
+	if mode, exists := matchgroups["mode"]; exists {
+		if mode != "" {
+			split = append(split, mode)
+		}
+	}
+	// Fix #26329. If the destination appears to be a file, and the source is null,
+	// it may be because we've fallen through the possible naming regex and hit a
+	// situation where the user intention was to map a file into a container through
+	// a local volume, but this is not supported by the platform.
+	if matchgroups["source"] == "" && matchgroups["destination"] != "" {
+		volExp := regexp.MustCompile(`^` + rxName + `$`)
+		reservedNameExp := regexp.MustCompile(`^` + rxReservedNames + `$`)
+
+		if volExp.MatchString(matchgroups["destination"]) {
+			if reservedNameExp.MatchString(matchgroups["destination"]) {
+				return nil, fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", matchgroups["destination"])
+			}
+		} else {
+
+			exists, isDir, _ := currentFileInfoProvider.fileInfo(matchgroups["destination"])
+			if exists && !isDir {
+				return nil, fmt.Errorf("file '%s' cannot be mapped. Only directories can be mapped on this platform", matchgroups["destination"])
+
+			}
+		}
+	}
+	return split, nil
+}
+
+func windowsValidMountMode(mode string) bool {
+	if mode == "" {
+		return true
+	}
+	return rwModes[strings.ToLower(mode)]
+}
+func windowsValidateNotRoot(p string) error {
+	p = strings.ToLower(strings.Replace(p, `/`, `\`, -1))
+	if p == "c:" || p == `c:\` {
+		return fmt.Errorf("destination path cannot be `c:` or `c:\\`: %v", p)
+	}
+	return nil
+}
+
+var windowsSpecificValidators mountValidator = func(mnt *mount.Mount) error {
+	return windowsValidateNotRoot(mnt.Target)
+}
+
+func windowsValidateRegex(p, r string) error {
+	if regexp.MustCompile(`^` + r + `$`).MatchString(strings.ToLower(p)) {
+		return nil
+	}
+	return fmt.Errorf("invalid mount path: '%s'", p)
+}
+func windowsValidateAbsolute(p string) error {
+	if err := windowsValidateRegex(p, rxDestination); err != nil {
+		return fmt.Errorf("invalid mount path: '%s' mount path must be absolute", p)
+	}
+	return nil
+}
+
+func windowsDetectMountType(p string) mount.Type {
+	if strings.HasPrefix(p, `\\.\pipe\`) {
+		return mount.TypeNamedPipe
+	} else if regexp.MustCompile(`^` + rxHostDir + `$`).MatchString(p) {
+		return mount.TypeBind
+	} else {
+		return mount.TypeVolume
+	}
+}
+
+func (p *windowsParser) ReadWrite(mode string) bool {
+	return strings.ToLower(mode) != "ro"
+}
+
+// IsVolumeNameValid checks a volume name in a platform specific manner.
+func (p *windowsParser) ValidateVolumeName(name string) error {
+	nameExp := regexp.MustCompile(`^` + rxName + `$`)
+	if !nameExp.MatchString(name) {
+		return errors.New("invalid volume name")
+	}
+	nameExp = regexp.MustCompile(`^` + rxReservedNames + `$`)
+	if nameExp.MatchString(name) {
+		return fmt.Errorf("volume name %q cannot be a reserved word for Windows filenames", name)
+	}
+	return nil
+}
+func (p *windowsParser) validateMountConfig(mnt *mount.Mount) error {
+	return p.validateMountConfigReg(mnt, rxDestination, windowsSpecificValidators)
+}
+
+type fileInfoProvider interface {
+	fileInfo(path string) (exist, isDir bool, err error)
+}
+
+type defaultFileInfoProvider struct {
+}
+
+func (defaultFileInfoProvider) fileInfo(path string) (exist, isDir bool, err error) {
+	fi, err := os.Stat(path)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return false, false, err
+		}
+		return false, false, nil
+	}
+	return true, fi.IsDir(), nil
+}
+
+var currentFileInfoProvider fileInfoProvider = defaultFileInfoProvider{}
+
+func (p *windowsParser) validateMountConfigReg(mnt *mount.Mount, destRegex string, additionalValidators ...mountValidator) error {
+
+	for _, v := range additionalValidators {
+		if err := v(mnt); err != nil {
+			return &errMountConfig{mnt, err}
+		}
+	}
+	if len(mnt.Target) == 0 {
+		return &errMountConfig{mnt, errMissingField("Target")}
+	}
+
+	if err := windowsValidateRegex(mnt.Target, destRegex); err != nil {
+		return &errMountConfig{mnt, err}
+	}
+
+	switch mnt.Type {
+	case mount.TypeBind:
+		if len(mnt.Source) == 0 {
+			return &errMountConfig{mnt, errMissingField("Source")}
+		}
+		// Don't error out just because the propagation mode is not supported on the platform
+		if opts := mnt.BindOptions; opts != nil {
+			if len(opts.Propagation) > 0 {
+				return &errMountConfig{mnt, fmt.Errorf("invalid propagation mode: %s", opts.Propagation)}
+			}
+		}
+		if mnt.VolumeOptions != nil {
+			return &errMountConfig{mnt, errExtraField("VolumeOptions")}
+		}
+
+		if err := windowsValidateAbsolute(mnt.Source); err != nil {
+			return &errMountConfig{mnt, err}
+		}
+
+		exists, isdir, err := currentFileInfoProvider.fileInfo(mnt.Source)
+		if err != nil {
+			return &errMountConfig{mnt, err}
+		}
+		if !exists {
+			return &errMountConfig{mnt, errBindNotExist}
+		}
+		if !isdir {
+			return &errMountConfig{mnt, fmt.Errorf("source path must be a directory")}
+		}
+
+	case mount.TypeVolume:
+		if mnt.BindOptions != nil {
+			return &errMountConfig{mnt, errExtraField("BindOptions")}
+		}
+
+		if len(mnt.Source) == 0 && mnt.ReadOnly {
+			return &errMountConfig{mnt, fmt.Errorf("must not set ReadOnly mode when using anonymous volumes")}
+		}
+
+		if len(mnt.Source) != 0 {
+			if err := p.ValidateVolumeName(mnt.Source); err != nil {
+				return &errMountConfig{mnt, err}
+			}
+		}
+	case mount.TypeNamedPipe:
+		if len(mnt.Source) == 0 {
+			return &errMountConfig{mnt, errMissingField("Source")}
+		}
+
+		if mnt.BindOptions != nil {
+			return &errMountConfig{mnt, errExtraField("BindOptions")}
+		}
+
+		if mnt.ReadOnly {
+			return &errMountConfig{mnt, errExtraField("ReadOnly")}
+		}
+
+		if windowsDetectMountType(mnt.Source) != mount.TypeNamedPipe {
+			return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Source)}
+		}
+
+		if windowsDetectMountType(mnt.Target) != mount.TypeNamedPipe {
+			return &errMountConfig{mnt, fmt.Errorf("'%s' is not a valid pipe path", mnt.Target)}
+		}
+	default:
+		return &errMountConfig{mnt, errors.New("mount type unknown")}
+	}
+	return nil
+}
+func (p *windowsParser) ParseMountRaw(raw, volumeDriver string) (*MountPoint, error) {
+	return p.parseMountRaw(raw, volumeDriver, rxDestination, true, windowsSpecificValidators)
+}
+
+func (p *windowsParser) parseMountRaw(raw, volumeDriver, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) {
+	arr, err := windowsSplitRawSpec(raw, destRegex)
+	if err != nil {
+		return nil, err
+	}
+
+	var spec mount.Mount
+	var mode string
+	switch len(arr) {
+	case 1:
+		// Just a destination path in the container
+		spec.Target = arr[0]
+	case 2:
+		if windowsValidMountMode(arr[1]) {
+			// Destination + Mode is not a valid volume - volumes
+			// cannot include a mode. e.g. /foo:rw
+			return nil, errInvalidSpec(raw)
+		}
+		// Host Source Path or Name + Destination
+		spec.Source = strings.Replace(arr[0], `/`, `\`, -1)
+		spec.Target = arr[1]
+	case 3:
+		// HostSourcePath+DestinationPath+Mode
+		spec.Source = strings.Replace(arr[0], `/`, `\`, -1)
+		spec.Target = arr[1]
+		mode = arr[2]
+	default:
+		return nil, errInvalidSpec(raw)
+	}
+	if convertTargetToBackslash {
+		spec.Target = strings.Replace(spec.Target, `/`, `\`, -1)
+	}
+
+	if !windowsValidMountMode(mode) {
+		return nil, errInvalidMode(mode)
+	}
+
+	spec.Type = windowsDetectMountType(spec.Source)
+	spec.ReadOnly = !p.ReadWrite(mode)
+
+	// cannot assume that if a volume driver is passed in that we should set it
+	if volumeDriver != "" && spec.Type == mount.TypeVolume {
+		spec.VolumeOptions = &mount.VolumeOptions{
+			DriverConfig: &mount.Driver{Name: volumeDriver},
+		}
+	}
+
+	if copyData, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet {
+		if spec.VolumeOptions == nil {
+			spec.VolumeOptions = &mount.VolumeOptions{}
+		}
+		spec.VolumeOptions.NoCopy = !copyData
+	}
+
+	mp, err := p.parseMountSpec(spec, destRegex, convertTargetToBackslash, additionalValidators...)
+	if mp != nil {
+		mp.Mode = mode
+	}
+	if err != nil {
+		err = fmt.Errorf("%v: %v", errInvalidSpec(raw), err)
+	}
+	return mp, err
+}
+
+func (p *windowsParser) ParseMountSpec(cfg mount.Mount) (*MountPoint, error) {
+	return p.parseMountSpec(cfg, rxDestination, true, windowsSpecificValidators)
+}
+func (p *windowsParser) parseMountSpec(cfg mount.Mount, destRegex string, convertTargetToBackslash bool, additionalValidators ...mountValidator) (*MountPoint, error) {
+	if err := p.validateMountConfigReg(&cfg, destRegex, additionalValidators...); err != nil {
+		return nil, err
+	}
+	mp := &MountPoint{
+		RW:          !cfg.ReadOnly,
+		Destination: cfg.Target,
+		Type:        cfg.Type,
+		Spec:        cfg,
+	}
+	if convertTargetToBackslash {
+		mp.Destination = strings.Replace(cfg.Target, `/`, `\`, -1)
+	}
+
+	switch cfg.Type {
+	case mount.TypeVolume:
+		if cfg.Source == "" {
+			mp.Name = stringid.GenerateNonCryptoID()
+		} else {
+			mp.Name = cfg.Source
+		}
+		mp.CopyData = p.DefaultCopyMode()
+
+		if cfg.VolumeOptions != nil {
+			if cfg.VolumeOptions.DriverConfig != nil {
+				mp.Driver = cfg.VolumeOptions.DriverConfig.Name
+			}
+			if cfg.VolumeOptions.NoCopy {
+				mp.CopyData = false
+			}
+		}
+	case mount.TypeBind:
+		mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1)
+	case mount.TypeNamedPipe:
+		mp.Source = strings.Replace(cfg.Source, `/`, `\`, -1)
+	}
+	// cleanup trailing `\` except for paths like `c:\`
+	if len(mp.Source) > 3 && mp.Source[len(mp.Source)-1] == '\\' {
+		mp.Source = mp.Source[:len(mp.Source)-1]
+	}
+	if len(mp.Destination) > 3 && mp.Destination[len(mp.Destination)-1] == '\\' {
+		mp.Destination = mp.Destination[:len(mp.Destination)-1]
+	}
+	return mp, nil
+}
+
+func (p *windowsParser) ParseVolumesFrom(spec string) (string, string, error) {
+	if len(spec) == 0 {
+		return "", "", fmt.Errorf("volumes-from specification cannot be an empty string")
+	}
+
+	specParts := strings.SplitN(spec, ":", 2)
+	id := specParts[0]
+	mode := "rw"
+
+	if len(specParts) == 2 {
+		mode = specParts[1]
+		if !windowsValidMountMode(mode) {
+			return "", "", errInvalidMode(mode)
+		}
+
+		// Do not allow copy modes on volumes-from
+		if _, isSet := getCopyMode(mode, p.DefaultCopyMode()); isSet {
+			return "", "", errInvalidMode(mode)
+		}
+	}
+	return id, mode, nil
+}
+
+func (p *windowsParser) DefaultPropagationMode() mount.Propagation {
+	return mount.Propagation("")
+}
+
+func (p *windowsParser) ConvertTmpfsOptions(opt *mount.TmpfsOptions, readOnly bool) (string, error) {
+	return "", fmt.Errorf("%s does not support tmpfs", runtime.GOOS)
+}
+func (p *windowsParser) DefaultCopyMode() bool {
+	return false
+}
+func (p *windowsParser) IsBackwardCompatible(m *MountPoint) bool {
+	return false
+}
+
+func (p *windowsParser) ValidateTmpfsMountDestination(dest string) error {
+	return errors.New("Platform does not support tmpfs")
+}